From 9a5f3a20355ae7bf95bc8890d568f5589cfdb1e4 Mon Sep 17 00:00:00 2001 From: jkneubuh <86427252+jkneubuh@users.noreply.github.com> Date: Tue, 7 Jun 2022 14:16:43 -0400 Subject: [PATCH] Initial commit Signed-off-by: Josh Kneubuhl --- .github/workflows/image-build-pr.yaml | 16 + .github/workflows/image-build.yaml | 20 + .github/workflows/integration-tests.yaml | 81 + .github/workflows/unit-tests.yaml | 43 + .gitignore | 95 + Dockerfile | 36 + LICENSE | 202 ++ Makefile | 232 ++ PROJECT | 33 + README.md | 77 + api/addtoscheme_ibp_v1beta1.go | 28 + api/apis.go | 31 + api/v1beta1/common.go | 59 + api/v1beta1/common_struct.go | 326 +++ api/v1beta1/groupversion_info.go | 38 + api/v1beta1/ibpca.go | 274 ++ api/v1beta1/ibpca_types.go | 330 +++ api/v1beta1/ibpconsole.go | 130 + api/v1beta1/ibpconsole_types.go | 405 +++ api/v1beta1/ibporderer.go | 300 +++ api/v1beta1/ibporderer_types.go | 415 +++ api/v1beta1/ibppeer.go | 326 +++ api/v1beta1/ibppeer_types.go | 490 ++++ api/v1beta1/zz_generated.deepcopy.go | 1965 ++++++++++++++ boilerplate/boilerplate.go.txt | 17 + boilerplate/boilerplate.sh.txt | 17 + build/entrypoint | 31 + build/user_setup | 43 + bundle.Dockerfile | 20 + ...source-operator.clusterserviceversion.yaml | 1889 ++++++++++++++ bundle/manifests/ibp.com_ibpcas.yaml | 388 +++ bundle/manifests/ibp.com_ibpconsoles.yaml | 803 ++++++ bundle/manifests/ibp.com_ibporderers.yaml | 926 +++++++ bundle/manifests/ibp.com_ibppeers.yaml | 862 ++++++ ...er-manager-metrics-service_v1_service.yaml | 16 + ...rization.k8s.io_v1_clusterrolebinding.yaml | 13 + ...ole_rbac.authorization.k8s.io_v1_role.yaml | 39 + ...c.authorization.k8s.io_v1_rolebinding.yaml | 13 + ...c.authorization.k8s.io_v1_clusterrole.yaml | 187 ++ ...c.authorization.k8s.io_v1_clusterrole.yaml | 10 + ...horization.k8s.io_v1beta1_clusterrole.yaml | 10 + ...rization.k8s.io_v1_clusterrolebinding.yaml | 13 + ...c.authorization.k8s.io_v1_clusterrole.yaml | 18 + ...rization.k8s.io_v1_clusterrolebinding.yaml | 13 + bundle/metadata/annotations.yaml | 14 + cmd/crd/main.go | 38 + config/certmanager/certificate.yaml | 26 + config/certmanager/kustomization.yaml | 5 + config/certmanager/kustomizeconfig.yaml | 16 + config/crd/bases/ibp.com_ibpcas.yaml | 389 +++ config/crd/bases/ibp.com_ibpconsoles.yaml | 804 ++++++ config/crd/bases/ibp.com_ibporderers.yaml | 927 +++++++ config/crd/bases/ibp.com_ibppeers.yaml | 863 ++++++ config/crd/kustomization.yaml | 30 + config/crd/kustomizeconfig.yaml | 17 + config/crd/patches/cainjection_in_ibpcas.yaml | 8 + .../patches/cainjection_in_ibpconsoles.yaml | 8 + .../patches/cainjection_in_ibporderers.yaml | 8 + .../crd/patches/cainjection_in_ibppeers.yaml | 8 + config/crd/patches/webhook_in_ibpcas.yaml | 17 + .../crd/patches/webhook_in_ibpconsoles.yaml | 17 + .../crd/patches/webhook_in_ibporderers.yaml | 17 + config/crd/patches/webhook_in_ibppeers.yaml | 17 + config/default/kustomization.yaml | 66 + config/default/manager_auth_proxy_patch.yaml | 25 + config/default/manager_webhook_patch.yaml | 23 + config/default/webhookcainjection_patch.yaml | 15 + .../ingress/k3s/ingress-nginx-controller.yaml | 39 + config/ingress/k3s/kustomization.yaml | 25 + .../kind/ingress-nginx-controller.yaml | 39 + config/ingress/kind/kustomization.yaml | 25 + config/ingress/kustomization.yaml | 0 config/manager/kustomization.yaml | 8 + config/manager/manager.yaml | 129 + ...source-operator.clusterserviceversion.yaml | 1887 ++++++++++++++ config/manifests/kustomization.yaml | 26 + config/prometheus/kustomization.yaml | 2 + config/prometheus/monitor.yaml | 20 + .../rbac/auth_proxy_client_clusterrole.yaml | 7 + config/rbac/auth_proxy_role.yaml | 13 + config/rbac/auth_proxy_role_binding.yaml | 12 + config/rbac/auth_proxy_service.yaml | 14 + config/rbac/ibpca_editor_role.yaml | 24 + config/rbac/ibpca_viewer_role.yaml | 20 + config/rbac/ibpconsole_editor_role.yaml | 24 + config/rbac/ibpconsole_viewer_role.yaml | 20 + config/rbac/ibporderer_editor_role.yaml | 24 + config/rbac/ibporderer_viewer_role.yaml | 20 + config/rbac/ibppeer_editor_role.yaml | 24 + config/rbac/ibppeer_viewer_role.yaml | 20 + config/rbac/kustomization.yaml | 13 + config/rbac/leader_election_role.yaml | 39 + config/rbac/leader_election_role_binding.yaml | 12 + config/rbac/role.yaml | 204 ++ config/rbac/role_binding.yaml | 30 + config/rbac/service_account.yaml | 24 + config/samples/ibp.com_v1beta1_ibpca.yaml | 35 + .../samples/ibp.com_v1beta1_ibpconsole.yaml | 40 + .../samples/ibp.com_v1beta1_ibporderer.yaml | 53 + config/samples/ibp.com_v1beta1_ibppeer.yaml | 57 + config/samples/kustomization.yaml | 0 config/scorecard/.osdk-scorecard.yaml | 17 + config/scorecard/kustomization.yaml | 0 config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 25 + config/webhook/service.yaml | 12 + controllers/add_ibpca.go | 28 + controllers/add_ibpconsole.go | 28 + controllers/add_ibporderer.go | 28 + controllers/add_ibppeer.go | 28 + controllers/common/common.go | 145 + controllers/controller.go | 37 + controllers/ibpca/ibpca_controller.go | 853 ++++++ controllers/ibpca/ibpca_controller_test.go | 355 +++ controllers/ibpca/ibpca_suite_test.go | 31 + controllers/ibpca/mocks/careconcile.go | 118 + controllers/ibpca/predicate.go | 154 ++ controllers/ibpca/predicate_test.go | 496 ++++ .../ibpconsole/ibpconsole_controller.go | 636 +++++ .../ibpconsole/ibpconsole_controller_test.go | 234 ++ .../ibpconsole/ibpconsole_suite_test.go | 31 + .../ibpconsole/mocks/consolereconcile.go | 118 + .../ibporderer/ibporderer_controller.go | 1118 ++++++++ .../ibporderer/ibporderer_controller_test.go | 794 ++++++ .../ibporderer/ibporderer_suite_test.go | 31 + .../ibporderer/mocks/ordererreconcile.go | 118 + controllers/ibporderer/predicate.go | 324 +++ controllers/ibporderer/predicate_test.go | 291 +++ controllers/ibppeer/ibppeer_controller.go | 939 +++++++ .../ibppeer/ibppeer_controller_test.go | 919 +++++++ controllers/ibppeer/ibppeer_suite_test.go | 31 + controllers/ibppeer/mocks/peerreconcile.go | 118 + controllers/ibppeer/predicate.go | 293 +++ controllers/mocks/client.go | 746 ++++++ controllers/suite_test.go | 92 + defaultconfig/ca/ca.yaml | 516 ++++ defaultconfig/ca/tlsca.yaml | 500 ++++ defaultconfig/console/console.go | 36 + defaultconfig/orderer/configtx.yaml | 240 ++ defaultconfig/orderer/orderer.yaml | 402 +++ defaultconfig/orderer/ouconfig-inter.yaml | 33 + defaultconfig/orderer/ouconfig.yaml | 33 + defaultconfig/orderer/v2/orderer.yaml | 374 +++ defaultconfig/orderer/v24/orderer.yaml | 420 +++ defaultconfig/peer/core.yaml | 728 ++++++ defaultconfig/peer/ouconfig-inter.yaml | 33 + defaultconfig/peer/ouconfig.yaml | 33 + defaultconfig/peer/v2/core.yaml | 799 ++++++ definitions/ca/deployment.yaml | 139 + definitions/ca/ingress.yaml | 26 + definitions/ca/ingressv1beta1.yaml | 26 + definitions/ca/pvc.yaml | 28 + definitions/ca/role.yaml | 37 + definitions/ca/rolebinding.yaml | 29 + definitions/ca/route.yaml | 33 + definitions/ca/service.yaml | 35 + definitions/ca/serviceaccount.yaml | 25 + definitions/console/configmap.yaml | 29 + definitions/console/console-configmap.yaml | 84 + definitions/console/deployer-configmap.yaml | 197 ++ definitions/console/deployer-service.yaml | 31 + definitions/console/deployment.yaml | 204 ++ definitions/console/ingress.yaml | 26 + definitions/console/ingressv1beta1.yaml | 26 + .../console/networkpolicy-denyall.yaml | 25 + .../console/networkpolicy-ingress.yaml | 54 + definitions/console/pvc.yaml | 28 + definitions/console/role.yaml | 79 + definitions/console/rolebinding.yaml | 29 + definitions/console/route.yaml | 33 + definitions/console/service.yaml | 31 + definitions/console/serviceaccount.yaml | 25 + definitions/orderer/configmap.yaml | 45 + definitions/orderer/deployment.yaml | 226 ++ definitions/orderer/ingress.yaml | 25 + definitions/orderer/ingressv1beta1.yaml | 25 + definitions/orderer/orderernode.yaml | 84 + definitions/orderer/pvc.yaml | 28 + definitions/orderer/role.yaml | 36 + definitions/orderer/rolebinding.yaml | 28 + definitions/orderer/route.yaml | 33 + .../orderer/saas-ingress-community.yaml | 42 + definitions/orderer/saas-ingress.yaml | 47 + .../saas-ingressv1beta1-community.yaml | 38 + definitions/orderer/saas-ingressv1beta1.yaml | 43 + definitions/orderer/service.yaml | 39 + definitions/orderer/serviceaccount.yaml | 25 + definitions/peer/chaincode-launcher.yaml | 64 + definitions/peer/couchdb-init.yaml | 50 + definitions/peer/couchdb-pvc.yaml | 28 + definitions/peer/couchdb.yaml | 64 + definitions/peer/deployment.yaml | 313 +++ definitions/peer/fluentd-configmap.yaml | 32 + definitions/peer/ingress.yaml | 25 + definitions/peer/ingressv1beta1.yaml | 25 + definitions/peer/pvc.yaml | 28 + definitions/peer/role.yaml | 41 + definitions/peer/rolebinding.yaml | 29 + definitions/peer/route.yaml | 33 + definitions/peer/saas-ingress-community.yaml | 42 + definitions/peer/saas-ingress.yaml | 47 + .../peer/saas-ingressv1beta1-community.yaml | 38 + definitions/peer/saas-ingressv1beta1.yaml | 43 + definitions/peer/service.yaml | 36 + definitions/peer/serviceaccount.yaml | 25 + docker-entrypoint.sh | 45 + docs/CONTRIBUTING.md | 3 + docs/DEVELOPING.md | 143 + docs/images/fabric-operator-components.png | Bin 0 -> 116040 bytes .../images/fabric-operator-sample-network.png | Bin 0 -> 97281 bytes go.mod | 153 ++ go.sum | 1025 ++++++++ integration/actions/ca/ca_suite_test.go | 174 ++ integration/actions/ca/ca_test.go | 405 +++ .../actions/orderer/orderer_suite_test.go | 269 ++ integration/actions/orderer/orderer_test.go | 527 ++++ integration/actions/peer/peer_suite_test.go | 265 ++ integration/actions/peer/peer_test.go | 595 +++++ integration/actions/peer/reenroll_test.go | 123 + integration/autorenew/autorenew_suite_test.go | 327 +++ integration/autorenew/autorenew_test.go | 213 ++ integration/ca/ca_suite_test.go | 146 ++ integration/ca/ca_test.go | 788 ++++++ .../cclauncher/cclauncher_suite_test.go | 278 ++ integration/cclauncher/cclauncher_test.go | 98 + integration/console/console_suite_test.go | 93 + integration/console/console_test.go | 689 +++++ integration/e2ev2/.gitignore | 2 + integration/e2ev2/ca_test.go | 90 + integration/e2ev2/config.yaml | 33 + integration/e2ev2/console_test.go | 235 ++ integration/e2ev2/e2ev2_suite_test.go | 252 ++ integration/e2ev2/e2ev2_test.go | 203 ++ integration/e2ev2/orderer_test.go | 381 +++ integration/e2ev2/peer_test.go | 204 ++ integration/helper/ca.go | 184 ++ integration/helper/crspecs.go | 298 +++ integration/helper/job.go | 44 + integration/helper/orderer.go | 91 + integration/helper/peer.go | 114 + integration/helper/session.go | 81 + integration/images.go | 45 + integration/init/init_suite_test.go | 31 + integration/init/init_test.go | 186 ++ integration/init/orderer_test.go | 310 +++ integration/init/peer_test.go | 337 +++ integration/integration.go | 491 ++++ integration/kind-config.yaml | 23 + .../migration/fabric/fabric_suite_test.go | 239 ++ integration/migration/fabric/orderer_test.go | 194 ++ integration/migration/fabric/peer_test.go | 214 ++ integration/migration/migration_suite_test.go | 125 + integration/migration/migration_test.go | 1003 +++++++ integration/nativeresourcepoller.go | 375 +++ integration/nginx-deployment.yaml | 127 + integration/operator.go | 207 ++ .../operatorrestart_suite_test.go | 370 +++ .../operatorrestart/operatorrestart_test.go | 114 + integration/orderer/orderer_suite_test.go | 105 + integration/orderer/orderer_test.go | 1404 ++++++++++ integration/peer/peer_suite_test.go | 170 ++ integration/peer/peer_test.go | 906 +++++++ .../restartmgr/restartmgr_suite_test.go | 321 +++ integration/restartmgr/restartmgr_test.go | 579 ++++ main.go | 198 ++ operatorconfig/config.go | 55 + operatorconfig/operator.go | 201 ++ operatorconfig/versions.go | 75 + pkg/action/action.go | 95 + pkg/action/action_suite_test.go | 31 + pkg/action/action_test.go | 100 + pkg/action/enroll.go | 80 + pkg/action/enroll_test.go | 62 + pkg/action/mocks/deploymentreset.go | 264 ++ pkg/action/mocks/enrollinstance.go | 2321 +++++++++++++++++ pkg/action/mocks/reenroller.go | 117 + pkg/action/mocks/reenrollinstance.go | 1910 ++++++++++++++ pkg/action/mocks/upgradeinstance.go | 2045 +++++++++++++++ pkg/action/upgradedbs.go | 528 ++++ pkg/action/upgradedbs_test.go | 173 ++ pkg/apis/ca/v1/ca.go | 374 +++ pkg/apis/ca/v1/functions.go | 40 + pkg/apis/common/common.go | 123 + pkg/apis/console/v1/console.go | 121 + pkg/apis/console/v1/zz_generated.deepcopy.go | 106 + pkg/apis/deployer/deployer.go | 549 ++++ pkg/apis/orderer/v1/orderer.go | 188 ++ pkg/apis/orderer/v2/orderer.go | 50 + pkg/apis/orderer/v24/orderer.go | 67 + pkg/apis/peer/v1/peer.go | 357 +++ pkg/apis/peer/v2/peer.go | 222 ++ pkg/certificate/certificate.go | 437 ++++ pkg/certificate/certificate_suite_test.go | 31 + pkg/certificate/certificate_test.go | 362 +++ pkg/certificate/mocks/reenroller.go | 108 + pkg/certificate/reenroller/client.go | 31 + pkg/certificate/reenroller/client_pkcs11.go | 57 + .../reenroller/hsmdaemonreenroller.go | 422 +++ pkg/certificate/reenroller/hsmreenroller.go | 492 ++++ pkg/certificate/reenroller/mocks/identity.go | 249 ++ pkg/certificate/reenroller/reenroller.go | 392 +++ .../reenroller/reenroller_suite_test.go | 31 + pkg/certificate/reenroller/reenroller_test.go | 221 ++ pkg/client/client.go | 74 + pkg/client/client_suite_test.go | 31 + pkg/client/client_test.go | 34 + pkg/command/command_suite_test.go | 31 + pkg/command/crdinstall.go | 60 + pkg/command/mocks/reader.go | 196 ++ pkg/command/operator.go | 293 +++ pkg/command/operator_test.go | 83 + pkg/controller/mocks/client.go | 746 ++++++ pkg/crd/crd_suite_test.go | 31 + pkg/crd/manager.go | 94 + pkg/crd/manager_test.go | 75 + pkg/crd/mocks/client.go | 117 + pkg/global/config.go | 64 + pkg/global/config_test.go | 163 ++ pkg/global/global_suite_test.go | 31 + pkg/initializer/ca/bccsp/config.go | 45 + pkg/initializer/ca/bccsp/configpkcs11.go | 63 + pkg/initializer/ca/ca.go | 476 ++++ pkg/initializer/ca/ca_suite_test.go | 33 + pkg/initializer/ca/ca_test.go | 298 +++ pkg/initializer/ca/config/ca.go | 71 + pkg/initializer/ca/config/ca_test.go | 114 + pkg/initializer/ca/config/config.go | 364 +++ .../ca/config/config_suite_test.go | 31 + pkg/initializer/ca/config/config_test.go | 112 + pkg/initializer/ca/config/db.go | 135 + pkg/initializer/ca/config/db_test.go | 136 + pkg/initializer/ca/config/intermediate.go | 76 + .../ca/config/intermediate_test.go | 88 + pkg/initializer/ca/config/operations.go | 81 + pkg/initializer/ca/config/operations_test.go | 91 + pkg/initializer/ca/config/tls.go | 83 + pkg/initializer/ca/config/tls_test.go | 91 + pkg/initializer/ca/hsm.go | 551 ++++ pkg/initializer/ca/hsm_test.go | 322 +++ pkg/initializer/ca/hsmdaemon.go | 325 +++ pkg/initializer/ca/initializer.go | 137 + pkg/initializer/ca/initializer_test.go | 120 + pkg/initializer/ca/mocks/client.go | 746 ++++++ pkg/initializer/ca/mocks/config.go | 701 +++++ pkg/initializer/ca/mocks/ibpca.go | 853 ++++++ pkg/initializer/ca/sw.go | 74 + pkg/initializer/ca/tls/tls.go | 178 ++ pkg/initializer/ca/tls/tls_suite_test.go | 31 + pkg/initializer/ca/tls/tls_test.go | 112 + pkg/initializer/common/common.go | 253 ++ pkg/initializer/common/common_suite_test.go | 31 + pkg/initializer/common/common_test.go | 127 + .../common/config/config_suite_test.go | 31 + pkg/initializer/common/config/config_test.go | 162 ++ pkg/initializer/common/config/crypto.go | 149 ++ pkg/initializer/common/config/hsmconfig.go | 197 ++ .../common/config/hsmconfig_test.go | 145 + pkg/initializer/common/config/hsmdaemon.go | 118 + pkg/initializer/common/config/mocks/crypto.go | 237 ++ pkg/initializer/common/config/nodeou.go | 57 + pkg/initializer/common/enroller/client.go | 31 + .../common/enroller/client_pkcs11.go | 57 + pkg/initializer/common/enroller/enroller.go | 141 + .../common/enroller/enroller_suite_test.go | 33 + .../common/enroller/enroller_test.go | 138 + .../common/enroller/fabcaclient.go | 141 + .../common/enroller/fabcaclient_test.go | 72 + pkg/initializer/common/enroller/factory.go | 76 + .../common/enroller/factory_test.go | 78 + .../common/enroller/hsmdaemonenroller.go | 346 +++ .../common/enroller/hsmdaemonenroller_test.go | 385 +++ .../common/enroller/hsmenroller.go | 456 ++++ .../common/enroller/hsmenroller_test.go | 293 +++ .../common/enroller/hsmproxyenroller.go | 62 + .../common/enroller/mocks/caclient.go | 454 ++++ .../common/enroller/mocks/client.go | 746 ++++++ .../common/enroller/mocks/cryptoenroller.go | 249 ++ .../common/enroller/mocks/cryptoinstance.go | 2321 +++++++++++++++++ .../common/enroller/mocks/hsmcaclient.go | 348 +++ .../common/enroller/mocks/instance.go | 1989 ++++++++++++++ pkg/initializer/common/enroller/swenroller.go | 162 ++ .../common/enroller/swenroller_test.go | 70 + .../common/mocks/cryptovalidator.go | 305 +++ pkg/initializer/common/mspparser/mspparser.go | 110 + .../common/mspparser/mspparser_suite_test.go | 31 + .../common/mspparser/mspparser_test.go | 64 + .../common/secretmanager/secretmanager.go | 428 +++ .../secretmanager/secretmanager_suite_test.go | 31 + .../secretmanager/secretmanager_test.go | 194 ++ pkg/initializer/cryptogen/bccsp.go | 80 + pkg/initializer/cryptogen/mocks/config.go | 172 ++ pkg/initializer/cryptogen/mocks/instance.go | 2050 +++++++++++++++ .../orderer/config/v1/config_suite_test.go | 31 + .../orderer/config/v1/config_test.go | 200 ++ pkg/initializer/orderer/config/v1/io.go | 61 + pkg/initializer/orderer/config/v1/orderer.go | 174 ++ .../orderer/config/v2/config_suite_test.go | 31 + .../orderer/config/v2/config_test.go | 198 ++ pkg/initializer/orderer/config/v2/io.go | 61 + pkg/initializer/orderer/config/v2/orderer.go | 141 + .../orderer/config/v24/config_suite_test.go | 31 + .../orderer/config/v24/config_test.go | 198 ++ pkg/initializer/orderer/config/v24/io.go | 61 + pkg/initializer/orderer/config/v24/orderer.go | 140 + pkg/initializer/orderer/configtx/config.go | 157 ++ pkg/initializer/orderer/configtx/configtx.go | 198 ++ .../orderer/configtx/configtx_suite_test.go | 31 + .../orderer/configtx/configtx_test.go | 53 + pkg/initializer/orderer/configtx/encoder.go | 205 ++ pkg/initializer/orderer/configtx/profile.go | 340 +++ .../orderer/configtx/profile_test.go | 188 ++ pkg/initializer/orderer/initializer.go | 496 ++++ pkg/initializer/orderer/initializer_test.go | 220 ++ pkg/initializer/orderer/mocks/ibporderer.go | 247 ++ pkg/initializer/orderer/orderer.go | 73 + pkg/initializer/orderer/orderer_suite_test.go | 31 + .../commoncore/commoncore_suite_test.go | 30 + .../peer/config/commoncore/commoncore_test.go | 257 ++ .../peer/config/commoncore/core.go | 92 + .../config/commoncore/testdata/test_core.yaml | 706 +++++ .../testdata/test_core_no_change.yaml | 5 + .../testdata/test_core_no_peer.yaml | 294 +++ pkg/initializer/peer/config/v1/config.go | 163 ++ .../peer/config/v1/config_suite_test.go | 31 + pkg/initializer/peer/config/v1/config_test.go | 662 +++++ .../peer/config/v1/deliveryclient.go | 66 + pkg/initializer/peer/config/v1/io.go | 63 + pkg/initializer/peer/config/v2/config.go | 197 ++ pkg/initializer/peer/config/v2/config_test.go | 129 + .../peer/config/v2/v2_suite_test.go | 31 + pkg/initializer/peer/coreconfigmap.go | 218 ++ pkg/initializer/peer/coreconfigmap_test.go | 143 + pkg/initializer/peer/initializer.go | 327 +++ pkg/initializer/peer/initializer_test.go | 496 ++++ pkg/initializer/peer/mocks/client.go | 746 ++++++ pkg/initializer/peer/mocks/ibppeer.go | 312 +++ pkg/initializer/peer/peer.go | 84 + pkg/initializer/peer/peer_suite_test.go | 51 + pkg/initializer/peer/peer_test.go | 91 + pkg/initializer/validator/validator.go | 237 ++ .../validator/validator_suite_test.go | 31 + pkg/initializer/validator/validator_test.go | 148 ++ pkg/k8s/clientset/client.go | 68 + pkg/k8s/controllerclient/client.go | 296 +++ pkg/k8s/controllerclient/client_structs.go | 58 + .../configmap/configmap_suite_test.go | 31 + pkg/manager/resources/configmap/manager.go | 189 ++ .../resources/configmap/manager_test.go | 106 + pkg/manager/resources/container/container.go | 237 ++ .../container/container_suite_test.go | 31 + .../resources/container/container_test.go | 66 + .../resources/deployment/deployment.go | 237 ++ .../deployment/deployment_suite_test.go | 31 + pkg/manager/resources/deployment/manager.go | 419 +++ .../resources/deployment/manager_test.go | 205 ++ .../resources/ingress/ingress_suite_test.go | 31 + pkg/manager/resources/ingress/manager.go | 264 ++ pkg/manager/resources/ingress/manager_test.go | 146 ++ .../ingressv1beta1/ingress_suite_test.go | 31 + .../resources/ingressv1beta1/manager.go | 264 ++ .../resources/ingressv1beta1/manager_test.go | 146 ++ pkg/manager/resources/job/job.go | 366 +++ pkg/manager/resources/job/job_suite_test.go | 33 + pkg/manager/resources/job/job_test.go | 265 ++ pkg/manager/resources/job/mocks/client.go | 746 ++++++ pkg/manager/resources/manager/manager.go | 188 ++ .../resources/mocks/resource_manager.go | 603 +++++ pkg/manager/resources/orderernode/manager.go | 307 +++ .../resources/orderernode/manager_test.go | 170 ++ .../orderernode/orderernode_suite_test.go | 31 + pkg/manager/resources/pv/manager.go | 158 ++ pkg/manager/resources/pv/manager_test.go | 106 + pkg/manager/resources/pv/pvc_suite_test.go | 31 + pkg/manager/resources/pvc/manager.go | 172 ++ pkg/manager/resources/pvc/manager_test.go | 106 + pkg/manager/resources/pvc/pvc_suite_test.go | 31 + pkg/manager/resources/resources.go | 45 + pkg/manager/resources/role/manager.go | 171 ++ pkg/manager/resources/role/manager_test.go | 106 + pkg/manager/resources/role/role_suite_test.go | 31 + pkg/manager/resources/rolebinding/manager.go | 171 ++ .../resources/rolebinding/manager_test.go | 106 + .../rolebinding/rolebinding_suite_test.go | 31 + pkg/manager/resources/route/manager.go | 182 ++ pkg/manager/resources/route/manager_test.go | 106 + .../resources/route/route_suite_test.go | 31 + pkg/manager/resources/service/manager.go | 182 ++ pkg/manager/resources/service/manager_test.go | 106 + .../resources/service/service_suite_test.go | 31 + .../resources/serviceaccount/manager.go | 172 ++ .../resources/serviceaccount/manager_test.go | 106 + .../serviceaccount_suite_test.go | 31 + pkg/migrator/initsecret/migrator.go | 113 + pkg/migrator/migrator.go | 55 + pkg/migrator/peer/fabric/fabric_suite_test.go | 31 + pkg/migrator/peer/fabric/migrator.go | 72 + pkg/migrator/peer/fabric/migrator_test.go | 86 + pkg/migrator/peer/fabric/mocks/migrator.go | 339 +++ .../peer/fabric/v2/mocks/configmapmanager.go | 195 ++ .../peer/fabric/v2/mocks/deploymentmanager.go | 338 +++ pkg/migrator/peer/fabric/v2/peer.go | 286 ++ pkg/migrator/peer/fabric/v2/peer_test.go | 367 +++ pkg/migrator/peer/fabric/v2/v2_suite_test.go | 31 + pkg/migrator/peer/peer_suite_test.go | 31 + pkg/offering/base/ca/ca.go | 1042 ++++++++ pkg/offering/base/ca/ca_suite_test.go | 46 + pkg/offering/base/ca/ca_test.go | 697 +++++ pkg/offering/base/ca/initialize.go | 517 ++++ pkg/offering/base/ca/initialize_test.go | 205 ++ .../base/ca/mocks/certificate_manager.go | 380 +++ pkg/offering/base/ca/mocks/initialize.go | 520 ++++ pkg/offering/base/ca/mocks/initializer.go | 206 ++ pkg/offering/base/ca/mocks/restart_manager.go | 335 +++ pkg/offering/base/ca/mocks/update.go | 752 ++++++ pkg/offering/base/ca/override/deployment.go | 369 +++ .../base/ca/override/deployment_test.go | 890 +++++++ pkg/offering/base/ca/override/override.go | 167 ++ .../base/ca/override/override_suite_test.go | 31 + .../base/ca/override/override_test.go | 119 + pkg/offering/base/ca/override/overridecm.go | 64 + pkg/offering/base/ca/override/pvc.go | 80 + pkg/offering/base/ca/override/pvc_test.go | 92 + pkg/offering/base/ca/override/role.go | 46 + pkg/offering/base/ca/override/rolebinding.go | 46 + pkg/offering/base/ca/override/service.go | 80 + pkg/offering/base/ca/override/service_test.go | 71 + .../base/ca/override/serviceaccount.go | 58 + .../base/ca/override/serviceaccount_test.go | 69 + pkg/offering/base/console/console.go | 682 +++++ .../base/console/console_suite_test.go | 31 + pkg/offering/base/console/console_test.go | 342 +++ .../base/console/mocks/restart_manager.go | 261 ++ pkg/offering/base/console/mocks/update.go | 362 +++ .../base/console/override/consolecm.go | 206 ++ .../base/console/override/consolecm_test.go | 185 ++ .../base/console/override/deployercm.go | 197 ++ .../base/console/override/deployercm_test.go | 746 ++++++ .../base/console/override/deployerservice.go | 63 + .../console/override/deployerservice_test.go | 74 + .../base/console/override/deployment.go | 458 ++++ .../base/console/override/deployment_test.go | 492 ++++ pkg/offering/base/console/override/envcm.go | 80 + .../base/console/override/envcm_test.go | 86 + .../base/console/override/override.go | 26 + .../console/override/override_suite_test.go | 31 + pkg/offering/base/console/override/pvc.go | 86 + .../base/console/override/pvc_test.go | 99 + pkg/offering/base/console/override/service.go | 74 + .../base/console/override/service_test.go | 99 + .../base/console/override/serviceaccount.go | 58 + .../console/override/serviceaccount_test.go | 82 + .../base/orderer/mocks/certificate_manager.go | 379 +++ .../base/orderer/mocks/deployment_manager.go | 682 +++++ .../orderer/mocks/initializeibporderer.go | 1376 ++++++++++ .../base/orderer/mocks/node_manager.go | 116 + .../base/orderer/mocks/restart_manager.go | 486 ++++ pkg/offering/base/orderer/mocks/update.go | 1533 +++++++++++ pkg/offering/base/orderer/node.go | 1719 ++++++++++++ pkg/offering/base/orderer/node_test.go | 724 +++++ pkg/offering/base/orderer/orderer.go | 883 +++++++ .../base/orderer/orderer_suite_test.go | 46 + pkg/offering/base/orderer/orderer_test.go | 216 ++ .../base/orderer/override/deployment.go | 451 ++++ .../base/orderer/override/deployment_test.go | 593 +++++ pkg/offering/base/orderer/override/envcm.go | 107 + .../base/orderer/override/orderernode.go | 70 + .../base/orderer/override/override.go | 30 + .../orderer/override/override_suite_test.go | 31 + .../base/orderer/override/override_test.go | 154 ++ pkg/offering/base/orderer/override/pvc.go | 80 + .../base/orderer/override/pvc_test.go | 99 + pkg/offering/base/orderer/override/service.go | 53 + .../base/orderer/override/service_test.go | 65 + .../base/orderer/override/serviceaccount.go | 58 + .../orderer/override/serviceaccount_test.go | 80 + .../base/peer/mocks/certificate_manager.go | 379 +++ .../base/peer/mocks/deployment_manager.go | 827 ++++++ pkg/offering/base/peer/mocks/initializer.go | 1043 ++++++++ .../base/peer/mocks/restart_manager.go | 486 ++++ pkg/offering/base/peer/mocks/update.go | 1637 ++++++++++++ pkg/offering/base/peer/override/deployment.go | 971 +++++++ .../base/peer/override/deployment_test.go | 1111 ++++++++ pkg/offering/base/peer/override/override.go | 33 + .../base/peer/override/override_suite_test.go | 31 + .../base/peer/override/override_test.go | 74 + pkg/offering/base/peer/override/pvc.go | 80 + pkg/offering/base/peer/override/pvc_test.go | 99 + pkg/offering/base/peer/override/service.go | 53 + .../base/peer/override/service_test.go | 65 + .../base/peer/override/serviceaccount.go | 58 + .../base/peer/override/serviceaccount_test.go | 80 + pkg/offering/base/peer/override/statedbpvc.go | 80 + pkg/offering/base/peer/peer.go | 1586 +++++++++++ pkg/offering/base/peer/peer_suite_test.go | 46 + pkg/offering/base/peer/peer_test.go | 936 +++++++ pkg/offering/common/backupcrypto.go | 367 +++ pkg/offering/common/common_suite_test.go | 31 + pkg/offering/common/common_test.go | 346 +++ pkg/offering/common/override.go | 108 + .../common/reconcilechecks/fabricversion.go | 137 + .../reconcilechecks/fabricversion_test.go | 200 ++ .../reconcilechecks/images/fabricversion.go | 80 + .../images/fabricversion_test.go | 293 +++ .../common/reconcilechecks/images/images.go | 214 ++ .../images/images_suite_test.go | 31 + .../reconcilechecks/images/images_test.go | 571 ++++ .../images/mocks/fabricversion.go | 102 + .../reconcilechecks/images/mocks/instance.go | 336 +++ .../reconcilechecks/images/mocks/update.go | 167 ++ .../common/reconcilechecks/mocks/image.go | 186 ++ .../common/reconcilechecks/mocks/instance.go | 336 +++ .../common/reconcilechecks/mocks/update.go | 167 ++ .../common/reconcilechecks/mocks/version.go | 186 ++ .../reconcilechecks_suite_test.go | 31 + pkg/offering/common/result.go | 30 + pkg/offering/common/secret.go | 374 +++ pkg/offering/k8s/ca/ca.go | 227 ++ pkg/offering/k8s/ca/ca_suite_test.go | 31 + pkg/offering/k8s/ca/ca_test.go | 264 ++ pkg/offering/k8s/ca/override/ingress.go | 117 + pkg/offering/k8s/ca/override/ingress_test.go | 115 + .../k8s/ca/override/ingressv1beta1.go | 107 + .../k8s/ca/override/ingressv1beta1_test.go | 105 + pkg/offering/k8s/ca/override/override.go | 27 + .../k8s/ca/override/override_suite_test.go | 31 + pkg/offering/k8s/ca/override/override_test.go | 145 + pkg/offering/k8s/console/console.go | 200 ++ .../k8s/console/console_suite_test.go | 31 + pkg/offering/k8s/console/console_test.go | 286 ++ .../k8s/console/override/consolecm.go | 78 + .../k8s/console/override/consolecm_test.go | 161 ++ .../k8s/console/override/deployercm.go | 79 + .../k8s/console/override/deployercm_test.go | 285 ++ pkg/offering/k8s/console/override/envcm.go | 49 + .../k8s/console/override/envcm_test.go | 87 + pkg/offering/k8s/console/override/ingress.go | 91 + .../k8s/console/override/ingress_test.go | 93 + .../k8s/console/override/ingressv1beta1.go | 86 + .../console/override/ingressv1beta1_test.go | 88 + pkg/offering/k8s/console/override/override.go | 27 + .../console/override/override_suite_test.go | 31 + .../k8s/console/override/override_test.go | 131 + pkg/offering/k8s/orderer/node.go | 266 ++ pkg/offering/k8s/orderer/orderer.go | 173 ++ .../k8s/orderer/orderer_suite_test.go | 31 + pkg/offering/k8s/orderer/orderer_test.go | 86 + pkg/offering/k8s/orderer/override/ingress.go | 177 ++ .../k8s/orderer/override/ingress_test.go | 140 + .../k8s/orderer/override/ingressv1beta1.go | 127 + .../orderer/override/ingressv1beta1_test.go | 125 + pkg/offering/k8s/orderer/override/override.go | 27 + .../orderer/override/override_suite_test.go | 31 + .../k8s/orderer/override/override_test.go | 161 ++ pkg/offering/k8s/peer/override/ingress.go | 141 + .../k8s/peer/override/ingress_test.go | 193 ++ .../k8s/peer/override/ingressv1beta1.go | 126 + .../k8s/peer/override/ingressv1beta1_test.go | 178 ++ pkg/offering/k8s/peer/override/override.go | 29 + .../k8s/peer/override/override_suite_test.go | 31 + pkg/offering/k8s/peer/peer.go | 292 +++ pkg/offering/k8s/peer/peer_suite_test.go | 31 + pkg/offering/k8s/peer/peer_test.go | 254 ++ pkg/offering/offering.go | 46 + pkg/offering/offering_suite_test.go | 31 + pkg/offering/offering_test.go | 55 + pkg/offering/openshift/ca/ca.go | 202 ++ pkg/offering/openshift/ca/ca_suite_test.go | 31 + pkg/offering/openshift/ca/ca_test.go | 267 ++ pkg/offering/openshift/ca/override/caroute.go | 67 + .../openshift/ca/override/operationroute.go | 67 + .../openshift/ca/override/override.go | 27 + .../ca/override/override_suite_test.go | 31 + .../openshift/ca/override/override_test.go | 87 + pkg/offering/openshift/console/console.go | 177 ++ .../openshift/console/console_suite_test.go | 31 + .../openshift/console/console_test.go | 138 + .../openshift/console/override/consolecm.go | 78 + .../console/override/consolecm_test.go | 161 ++ .../console/override/consoleroute.go | 67 + .../console/override/consoleroute_test.go | 70 + .../openshift/console/override/deployercm.go | 79 + .../console/override/deployercm_test.go | 254 ++ .../openshift/console/override/envcm.go | 52 + .../openshift/console/override/envcm_test.go | 95 + .../openshift/console/override/override.go | 27 + .../console/override/override_suite_test.go | 31 + .../openshift/console/override/proxyroute.go | 67 + .../console/override/proxyroute_test.go | 70 + pkg/offering/openshift/orderer/node.go | 259 ++ pkg/offering/openshift/orderer/orderer.go | 156 ++ .../openshift/orderer/orderer_suite_test.go | 31 + .../openshift/orderer/orderer_test.go | 88 + .../openshift/orderer/override/adminroute.go | 72 + .../openshift/orderer/override/grpcroute.go | 67 + .../orderer/override/operationroute.go | 67 + .../orderer/override/ordererroute.go | 67 + .../openshift/orderer/override/override.go | 27 + .../orderer/override/override_suite_test.go | 31 + .../orderer/override/override_test.go | 104 + .../openshift/peer/override/grpcroute.go | 67 + .../openshift/peer/override/operationroute.go | 67 + .../openshift/peer/override/override.go | 27 + .../peer/override/override_suite_test.go | 31 + .../openshift/peer/override/override_test.go | 104 + .../openshift/peer/override/peerroute.go | 67 + pkg/offering/openshift/peer/peer.go | 315 +++ .../openshift/peer/peer_suite_test.go | 31 + pkg/offering/openshift/peer/peer_test.go | 221 ++ pkg/operatorerrors/errors.go | 153 ++ pkg/operatorerrors/errors_test.go | 54 + .../operatorerrors_suite_test.go | 31 + pkg/restart/configmap/configmap_suite_test.go | 31 + pkg/restart/configmap/configmap_test.go | 107 + pkg/restart/configmap/manager.go | 94 + pkg/restart/restart.go | 360 +++ pkg/restart/restart_structs.go | 54 + pkg/restart/restart_suite_test.go | 31 + pkg/restart/restart_test.go | 380 +++ .../staggerrestarts/staggerrestarts.go | 408 +++ .../staggerrestarts_structs.go | 65 + .../staggerrestarts_suite_test.go | 31 + .../staggerrestarts/staggerrestarts_test.go | 389 +++ pkg/util/image/image.go | 62 + pkg/util/merge/merge.go | 61 + pkg/util/merge/merge_suite_test.go | 31 + pkg/util/merge/merge_test.go | 216 ++ pkg/util/pointer/pointer.go | 28 + pkg/util/testdata/invalid_kind.yaml | 1 + pkg/util/util.go | 940 +++++++ pkg/util/util_suite_test.go | 46 + pkg/util/util_test.go | 399 +++ sample-network/.gitignore | 4 + sample-network/README.md | 207 ++ sample-network/config/cas/kustomization.yaml | 25 + sample-network/config/cas/org0-ca.yaml | 135 + sample-network/config/cas/org1-ca.yaml | 128 + sample-network/config/cas/org2-ca.yaml | 130 + sample-network/config/configtx-template.yaml | 428 +++ .../console/hlf-operations-console.yaml | 80 + .../config/console/kustomization.yaml | 5 + sample-network/config/core.yaml | 775 ++++++ .../config/manager/hlf-operator-manager.yaml | 123 + .../config/manager/kustomization.yaml | 22 + .../config/orderers/kustomization.yaml | 24 + .../config/orderers/org0-orderers.yaml | 151 ++ .../config/peers/kustomization.yaml | 27 + sample-network/config/peers/org1-peer1.yaml | 102 + sample-network/config/peers/org1-peer2.yaml | 102 + sample-network/config/peers/org2-peer1.yaml | 102 + sample-network/config/peers/org2-peer2.yaml | 102 + .../config/rbac/hlf-operator-clusterrole.yaml | 205 ++ .../rbac/hlf-operator-clusterrolebinding.yaml | 36 + .../config/rbac/hlf-operator-rolebinding.yaml | 19 + .../rbac/hlf-operator-serviceaccount.yaml | 22 + sample-network/config/rbac/hlf-psp.yaml | 48 + sample-network/config/rbac/kustomization.yaml | 26 + sample-network/network | 194 ++ sample-network/scripts/chaincode.sh | 383 +++ sample-network/scripts/channel.sh | 289 ++ sample-network/scripts/cluster.sh | 156 ++ sample-network/scripts/console.sh | 49 + sample-network/scripts/kind.sh | 124 + sample-network/scripts/prereqs.sh | 73 + sample-network/scripts/test_network.sh | 153 ++ sample-network/scripts/utils.sh | 156 ++ scripts/check-license.sh | 140 + scripts/checks.sh | 44 + scripts/download_binaries.sh | 25 + scripts/go-sec.sh | 27 + scripts/install-tools.sh | 41 + scripts/run-unit-tests.sh | 39 + testdata/deploy/ca/adminsecret.yaml | 26 + testdata/deploy/ca/tlssecret.yaml | 27 + testdata/deploy/console/secret.yaml | 25 + testdata/deploy/console/tlssecret.yaml | 26 + .../deploy/console/ui-password-secret.yaml | 25 + testdata/deploy/operator.yaml | 145 + testdata/deploy/operatorhsm.yaml | 150 ++ testdata/deploy/orderer/secret.yaml | 24 + testdata/deploy/peer/secret.yaml | 27 + testdata/deploy/role.yaml | 192 ++ testdata/deploy/role_binding.yaml | 30 + testdata/deploy/role_ocp.yaml | 152 ++ testdata/deploy/service_account.yaml | 22 + testdata/deployercm/deployer-configmap.yaml | 185 ++ testdata/init/ca/cert.pem | 21 + testdata/init/ca/key.pem | 28 + testdata/init/ca/override.yaml | 47 + testdata/init/orderer/configtx.yaml | 275 ++ testdata/init/orderer/msp/cacerts/cert.pem | 21 + testdata/init/orderer/orderer.yaml | 401 +++ testdata/init/peer/core.yaml | 708 +++++ testdata/init/peer/core_bootstrap_test.yaml | 706 +++++ testdata/init/peer/core_invalid.yaml | 708 +++++ testdata/init/peer/tls-cert.pem | 16 + testdata/init/peer/tls-key.pem | 5 + testdata/migration/secret.json | 30 + testdata/msp/keystore/key.pem | 28 + testdata/operatorconfig.yaml | 5 + testdata/secret.yaml | 2 + testdata/tls/tls.crt | 21 + testdata/tls/tls.key | 28 + tools/tools.go | 30 + version/fabricversion.go | 100 + version/version.go | 252 ++ version/version_suite_test.go | 31 + version/version_test.go | 167 ++ 807 files changed, 163902 insertions(+) create mode 100644 .github/workflows/image-build-pr.yaml create mode 100644 .github/workflows/image-build.yaml create mode 100644 .github/workflows/integration-tests.yaml create mode 100644 .github/workflows/unit-tests.yaml create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 PROJECT create mode 100644 README.md create mode 100644 api/addtoscheme_ibp_v1beta1.go create mode 100644 api/apis.go create mode 100644 api/v1beta1/common.go create mode 100644 api/v1beta1/common_struct.go create mode 100644 api/v1beta1/groupversion_info.go create mode 100644 api/v1beta1/ibpca.go create mode 100644 api/v1beta1/ibpca_types.go create mode 100644 api/v1beta1/ibpconsole.go create mode 100644 api/v1beta1/ibpconsole_types.go create mode 100644 api/v1beta1/ibporderer.go create mode 100644 api/v1beta1/ibporderer_types.go create mode 100644 api/v1beta1/ibppeer.go create mode 100644 api/v1beta1/ibppeer_types.go create mode 100644 api/v1beta1/zz_generated.deepcopy.go create mode 100644 boilerplate/boilerplate.go.txt create mode 100644 boilerplate/boilerplate.sh.txt create mode 100755 build/entrypoint create mode 100755 build/user_setup create mode 100644 bundle.Dockerfile create mode 100644 bundle/manifests/fabric-opensource-operator.clusterserviceversion.yaml create mode 100644 bundle/manifests/ibp.com_ibpcas.yaml create mode 100644 bundle/manifests/ibp.com_ibpconsoles.yaml create mode 100644 bundle/manifests/ibp.com_ibporderers.yaml create mode 100644 bundle/manifests/ibp.com_ibppeers.yaml create mode 100644 bundle/manifests/operator-controller-manager-metrics-service_v1_service.yaml create mode 100644 bundle/manifests/operator-ibm-hlfsupport_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml create mode 100644 bundle/manifests/operator-leader-election-role_rbac.authorization.k8s.io_v1_role.yaml create mode 100644 bundle/manifests/operator-leader-election-rolebinding_rbac.authorization.k8s.io_v1_rolebinding.yaml create mode 100644 bundle/manifests/operator-manager-role_rbac.authorization.k8s.io_v1_clusterrole.yaml create mode 100644 bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml create mode 100644 bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1beta1_clusterrole.yaml create mode 100644 bundle/manifests/operator-operator_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml create mode 100644 bundle/manifests/operator-proxy-role_rbac.authorization.k8s.io_v1_clusterrole.yaml create mode 100644 bundle/manifests/operator-proxy-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml create mode 100644 bundle/metadata/annotations.yaml create mode 100644 cmd/crd/main.go create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/bases/ibp.com_ibpcas.yaml create mode 100644 config/crd/bases/ibp.com_ibpconsoles.yaml create mode 100644 config/crd/bases/ibp.com_ibporderers.yaml create mode 100644 config/crd/bases/ibp.com_ibppeers.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_ibpcas.yaml create mode 100644 config/crd/patches/cainjection_in_ibpconsoles.yaml create mode 100644 config/crd/patches/cainjection_in_ibporderers.yaml create mode 100644 config/crd/patches/cainjection_in_ibppeers.yaml create mode 100644 config/crd/patches/webhook_in_ibpcas.yaml create mode 100644 config/crd/patches/webhook_in_ibpconsoles.yaml create mode 100644 config/crd/patches/webhook_in_ibporderers.yaml create mode 100644 config/crd/patches/webhook_in_ibppeers.yaml create mode 100644 config/default/kustomization.yaml create mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml create mode 100644 config/ingress/k3s/ingress-nginx-controller.yaml create mode 100644 config/ingress/k3s/kustomization.yaml create mode 100644 config/ingress/kind/ingress-nginx-controller.yaml create mode 100644 config/ingress/kind/kustomization.yaml create mode 100644 config/ingress/kustomization.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml create mode 100644 config/manifests/bases/fabric-opensource-operator.clusterserviceversion.yaml create mode 100644 config/manifests/kustomization.yaml create mode 100644 config/prometheus/kustomization.yaml create mode 100644 config/prometheus/monitor.yaml create mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml create mode 100644 config/rbac/auth_proxy_role.yaml create mode 100644 config/rbac/auth_proxy_role_binding.yaml create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/ibpca_editor_role.yaml create mode 100644 config/rbac/ibpca_viewer_role.yaml create mode 100644 config/rbac/ibpconsole_editor_role.yaml create mode 100644 config/rbac/ibpconsole_viewer_role.yaml create mode 100644 config/rbac/ibporderer_editor_role.yaml create mode 100644 config/rbac/ibporderer_viewer_role.yaml create mode 100644 config/rbac/ibppeer_editor_role.yaml create mode 100644 config/rbac/ibppeer_viewer_role.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml create mode 100644 config/rbac/leader_election_role_binding.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/role_binding.yaml create mode 100644 config/rbac/service_account.yaml create mode 100644 config/samples/ibp.com_v1beta1_ibpca.yaml create mode 100644 config/samples/ibp.com_v1beta1_ibpconsole.yaml create mode 100644 config/samples/ibp.com_v1beta1_ibporderer.yaml create mode 100644 config/samples/ibp.com_v1beta1_ibppeer.yaml create mode 100644 config/samples/kustomization.yaml create mode 100644 config/scorecard/.osdk-scorecard.yaml create mode 100644 config/scorecard/kustomization.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/service.yaml create mode 100644 controllers/add_ibpca.go create mode 100644 controllers/add_ibpconsole.go create mode 100644 controllers/add_ibporderer.go create mode 100644 controllers/add_ibppeer.go create mode 100644 controllers/common/common.go create mode 100644 controllers/controller.go create mode 100644 controllers/ibpca/ibpca_controller.go create mode 100644 controllers/ibpca/ibpca_controller_test.go create mode 100644 controllers/ibpca/ibpca_suite_test.go create mode 100644 controllers/ibpca/mocks/careconcile.go create mode 100644 controllers/ibpca/predicate.go create mode 100644 controllers/ibpca/predicate_test.go create mode 100644 controllers/ibpconsole/ibpconsole_controller.go create mode 100644 controllers/ibpconsole/ibpconsole_controller_test.go create mode 100644 controllers/ibpconsole/ibpconsole_suite_test.go create mode 100644 controllers/ibpconsole/mocks/consolereconcile.go create mode 100644 controllers/ibporderer/ibporderer_controller.go create mode 100644 controllers/ibporderer/ibporderer_controller_test.go create mode 100644 controllers/ibporderer/ibporderer_suite_test.go create mode 100644 controllers/ibporderer/mocks/ordererreconcile.go create mode 100644 controllers/ibporderer/predicate.go create mode 100644 controllers/ibporderer/predicate_test.go create mode 100644 controllers/ibppeer/ibppeer_controller.go create mode 100644 controllers/ibppeer/ibppeer_controller_test.go create mode 100644 controllers/ibppeer/ibppeer_suite_test.go create mode 100644 controllers/ibppeer/mocks/peerreconcile.go create mode 100644 controllers/ibppeer/predicate.go create mode 100644 controllers/mocks/client.go create mode 100644 controllers/suite_test.go create mode 100644 defaultconfig/ca/ca.yaml create mode 100644 defaultconfig/ca/tlsca.yaml create mode 100644 defaultconfig/console/console.go create mode 100644 defaultconfig/orderer/configtx.yaml create mode 100644 defaultconfig/orderer/orderer.yaml create mode 100644 defaultconfig/orderer/ouconfig-inter.yaml create mode 100644 defaultconfig/orderer/ouconfig.yaml create mode 100644 defaultconfig/orderer/v2/orderer.yaml create mode 100644 defaultconfig/orderer/v24/orderer.yaml create mode 100644 defaultconfig/peer/core.yaml create mode 100644 defaultconfig/peer/ouconfig-inter.yaml create mode 100644 defaultconfig/peer/ouconfig.yaml create mode 100644 defaultconfig/peer/v2/core.yaml create mode 100644 definitions/ca/deployment.yaml create mode 100644 definitions/ca/ingress.yaml create mode 100644 definitions/ca/ingressv1beta1.yaml create mode 100644 definitions/ca/pvc.yaml create mode 100644 definitions/ca/role.yaml create mode 100644 definitions/ca/rolebinding.yaml create mode 100644 definitions/ca/route.yaml create mode 100644 definitions/ca/service.yaml create mode 100644 definitions/ca/serviceaccount.yaml create mode 100644 definitions/console/configmap.yaml create mode 100644 definitions/console/console-configmap.yaml create mode 100644 definitions/console/deployer-configmap.yaml create mode 100644 definitions/console/deployer-service.yaml create mode 100644 definitions/console/deployment.yaml create mode 100644 definitions/console/ingress.yaml create mode 100644 definitions/console/ingressv1beta1.yaml create mode 100644 definitions/console/networkpolicy-denyall.yaml create mode 100644 definitions/console/networkpolicy-ingress.yaml create mode 100644 definitions/console/pvc.yaml create mode 100644 definitions/console/role.yaml create mode 100644 definitions/console/rolebinding.yaml create mode 100644 definitions/console/route.yaml create mode 100644 definitions/console/service.yaml create mode 100644 definitions/console/serviceaccount.yaml create mode 100644 definitions/orderer/configmap.yaml create mode 100644 definitions/orderer/deployment.yaml create mode 100644 definitions/orderer/ingress.yaml create mode 100644 definitions/orderer/ingressv1beta1.yaml create mode 100644 definitions/orderer/orderernode.yaml create mode 100644 definitions/orderer/pvc.yaml create mode 100644 definitions/orderer/role.yaml create mode 100644 definitions/orderer/rolebinding.yaml create mode 100644 definitions/orderer/route.yaml create mode 100644 definitions/orderer/saas-ingress-community.yaml create mode 100644 definitions/orderer/saas-ingress.yaml create mode 100644 definitions/orderer/saas-ingressv1beta1-community.yaml create mode 100644 definitions/orderer/saas-ingressv1beta1.yaml create mode 100644 definitions/orderer/service.yaml create mode 100644 definitions/orderer/serviceaccount.yaml create mode 100644 definitions/peer/chaincode-launcher.yaml create mode 100644 definitions/peer/couchdb-init.yaml create mode 100644 definitions/peer/couchdb-pvc.yaml create mode 100644 definitions/peer/couchdb.yaml create mode 100644 definitions/peer/deployment.yaml create mode 100644 definitions/peer/fluentd-configmap.yaml create mode 100644 definitions/peer/ingress.yaml create mode 100644 definitions/peer/ingressv1beta1.yaml create mode 100644 definitions/peer/pvc.yaml create mode 100644 definitions/peer/role.yaml create mode 100644 definitions/peer/rolebinding.yaml create mode 100644 definitions/peer/route.yaml create mode 100644 definitions/peer/saas-ingress-community.yaml create mode 100644 definitions/peer/saas-ingress.yaml create mode 100644 definitions/peer/saas-ingressv1beta1-community.yaml create mode 100644 definitions/peer/saas-ingressv1beta1.yaml create mode 100644 definitions/peer/service.yaml create mode 100644 definitions/peer/serviceaccount.yaml create mode 100755 docker-entrypoint.sh create mode 100644 docs/CONTRIBUTING.md create mode 100644 docs/DEVELOPING.md create mode 100644 docs/images/fabric-operator-components.png create mode 100644 docs/images/fabric-operator-sample-network.png create mode 100644 go.mod create mode 100644 go.sum create mode 100644 integration/actions/ca/ca_suite_test.go create mode 100644 integration/actions/ca/ca_test.go create mode 100644 integration/actions/orderer/orderer_suite_test.go create mode 100644 integration/actions/orderer/orderer_test.go create mode 100644 integration/actions/peer/peer_suite_test.go create mode 100644 integration/actions/peer/peer_test.go create mode 100644 integration/actions/peer/reenroll_test.go create mode 100644 integration/autorenew/autorenew_suite_test.go create mode 100644 integration/autorenew/autorenew_test.go create mode 100644 integration/ca/ca_suite_test.go create mode 100644 integration/ca/ca_test.go create mode 100644 integration/cclauncher/cclauncher_suite_test.go create mode 100644 integration/cclauncher/cclauncher_test.go create mode 100644 integration/console/console_suite_test.go create mode 100644 integration/console/console_test.go create mode 100644 integration/e2ev2/.gitignore create mode 100644 integration/e2ev2/ca_test.go create mode 100644 integration/e2ev2/config.yaml create mode 100644 integration/e2ev2/console_test.go create mode 100644 integration/e2ev2/e2ev2_suite_test.go create mode 100644 integration/e2ev2/e2ev2_test.go create mode 100644 integration/e2ev2/orderer_test.go create mode 100644 integration/e2ev2/peer_test.go create mode 100644 integration/helper/ca.go create mode 100644 integration/helper/crspecs.go create mode 100644 integration/helper/job.go create mode 100644 integration/helper/orderer.go create mode 100644 integration/helper/peer.go create mode 100644 integration/helper/session.go create mode 100644 integration/images.go create mode 100644 integration/init/init_suite_test.go create mode 100644 integration/init/init_test.go create mode 100644 integration/init/orderer_test.go create mode 100644 integration/init/peer_test.go create mode 100644 integration/integration.go create mode 100644 integration/kind-config.yaml create mode 100644 integration/migration/fabric/fabric_suite_test.go create mode 100644 integration/migration/fabric/orderer_test.go create mode 100644 integration/migration/fabric/peer_test.go create mode 100644 integration/migration/migration_suite_test.go create mode 100644 integration/migration/migration_test.go create mode 100644 integration/nativeresourcepoller.go create mode 100644 integration/nginx-deployment.yaml create mode 100644 integration/operator.go create mode 100644 integration/operatorrestart/operatorrestart_suite_test.go create mode 100644 integration/operatorrestart/operatorrestart_test.go create mode 100644 integration/orderer/orderer_suite_test.go create mode 100644 integration/orderer/orderer_test.go create mode 100644 integration/peer/peer_suite_test.go create mode 100644 integration/peer/peer_test.go create mode 100644 integration/restartmgr/restartmgr_suite_test.go create mode 100644 integration/restartmgr/restartmgr_test.go create mode 100644 main.go create mode 100644 operatorconfig/config.go create mode 100644 operatorconfig/operator.go create mode 100644 operatorconfig/versions.go create mode 100644 pkg/action/action.go create mode 100644 pkg/action/action_suite_test.go create mode 100644 pkg/action/action_test.go create mode 100644 pkg/action/enroll.go create mode 100644 pkg/action/enroll_test.go create mode 100644 pkg/action/mocks/deploymentreset.go create mode 100644 pkg/action/mocks/enrollinstance.go create mode 100644 pkg/action/mocks/reenroller.go create mode 100644 pkg/action/mocks/reenrollinstance.go create mode 100644 pkg/action/mocks/upgradeinstance.go create mode 100644 pkg/action/upgradedbs.go create mode 100644 pkg/action/upgradedbs_test.go create mode 100644 pkg/apis/ca/v1/ca.go create mode 100644 pkg/apis/ca/v1/functions.go create mode 100644 pkg/apis/common/common.go create mode 100644 pkg/apis/console/v1/console.go create mode 100644 pkg/apis/console/v1/zz_generated.deepcopy.go create mode 100644 pkg/apis/deployer/deployer.go create mode 100644 pkg/apis/orderer/v1/orderer.go create mode 100644 pkg/apis/orderer/v2/orderer.go create mode 100644 pkg/apis/orderer/v24/orderer.go create mode 100644 pkg/apis/peer/v1/peer.go create mode 100644 pkg/apis/peer/v2/peer.go create mode 100644 pkg/certificate/certificate.go create mode 100644 pkg/certificate/certificate_suite_test.go create mode 100644 pkg/certificate/certificate_test.go create mode 100644 pkg/certificate/mocks/reenroller.go create mode 100644 pkg/certificate/reenroller/client.go create mode 100644 pkg/certificate/reenroller/client_pkcs11.go create mode 100644 pkg/certificate/reenroller/hsmdaemonreenroller.go create mode 100644 pkg/certificate/reenroller/hsmreenroller.go create mode 100644 pkg/certificate/reenroller/mocks/identity.go create mode 100644 pkg/certificate/reenroller/reenroller.go create mode 100644 pkg/certificate/reenroller/reenroller_suite_test.go create mode 100644 pkg/certificate/reenroller/reenroller_test.go create mode 100644 pkg/client/client.go create mode 100644 pkg/client/client_suite_test.go create mode 100644 pkg/client/client_test.go create mode 100644 pkg/command/command_suite_test.go create mode 100644 pkg/command/crdinstall.go create mode 100644 pkg/command/mocks/reader.go create mode 100644 pkg/command/operator.go create mode 100644 pkg/command/operator_test.go create mode 100644 pkg/controller/mocks/client.go create mode 100644 pkg/crd/crd_suite_test.go create mode 100644 pkg/crd/manager.go create mode 100644 pkg/crd/manager_test.go create mode 100644 pkg/crd/mocks/client.go create mode 100644 pkg/global/config.go create mode 100644 pkg/global/config_test.go create mode 100644 pkg/global/global_suite_test.go create mode 100644 pkg/initializer/ca/bccsp/config.go create mode 100644 pkg/initializer/ca/bccsp/configpkcs11.go create mode 100644 pkg/initializer/ca/ca.go create mode 100644 pkg/initializer/ca/ca_suite_test.go create mode 100644 pkg/initializer/ca/ca_test.go create mode 100644 pkg/initializer/ca/config/ca.go create mode 100644 pkg/initializer/ca/config/ca_test.go create mode 100644 pkg/initializer/ca/config/config.go create mode 100644 pkg/initializer/ca/config/config_suite_test.go create mode 100644 pkg/initializer/ca/config/config_test.go create mode 100644 pkg/initializer/ca/config/db.go create mode 100644 pkg/initializer/ca/config/db_test.go create mode 100644 pkg/initializer/ca/config/intermediate.go create mode 100644 pkg/initializer/ca/config/intermediate_test.go create mode 100644 pkg/initializer/ca/config/operations.go create mode 100644 pkg/initializer/ca/config/operations_test.go create mode 100644 pkg/initializer/ca/config/tls.go create mode 100644 pkg/initializer/ca/config/tls_test.go create mode 100644 pkg/initializer/ca/hsm.go create mode 100644 pkg/initializer/ca/hsm_test.go create mode 100644 pkg/initializer/ca/hsmdaemon.go create mode 100644 pkg/initializer/ca/initializer.go create mode 100644 pkg/initializer/ca/initializer_test.go create mode 100644 pkg/initializer/ca/mocks/client.go create mode 100644 pkg/initializer/ca/mocks/config.go create mode 100644 pkg/initializer/ca/mocks/ibpca.go create mode 100644 pkg/initializer/ca/sw.go create mode 100644 pkg/initializer/ca/tls/tls.go create mode 100644 pkg/initializer/ca/tls/tls_suite_test.go create mode 100644 pkg/initializer/ca/tls/tls_test.go create mode 100644 pkg/initializer/common/common.go create mode 100644 pkg/initializer/common/common_suite_test.go create mode 100644 pkg/initializer/common/common_test.go create mode 100644 pkg/initializer/common/config/config_suite_test.go create mode 100644 pkg/initializer/common/config/config_test.go create mode 100644 pkg/initializer/common/config/crypto.go create mode 100644 pkg/initializer/common/config/hsmconfig.go create mode 100644 pkg/initializer/common/config/hsmconfig_test.go create mode 100644 pkg/initializer/common/config/hsmdaemon.go create mode 100644 pkg/initializer/common/config/mocks/crypto.go create mode 100644 pkg/initializer/common/config/nodeou.go create mode 100644 pkg/initializer/common/enroller/client.go create mode 100644 pkg/initializer/common/enroller/client_pkcs11.go create mode 100644 pkg/initializer/common/enroller/enroller.go create mode 100644 pkg/initializer/common/enroller/enroller_suite_test.go create mode 100644 pkg/initializer/common/enroller/enroller_test.go create mode 100644 pkg/initializer/common/enroller/fabcaclient.go create mode 100644 pkg/initializer/common/enroller/fabcaclient_test.go create mode 100644 pkg/initializer/common/enroller/factory.go create mode 100644 pkg/initializer/common/enroller/factory_test.go create mode 100644 pkg/initializer/common/enroller/hsmdaemonenroller.go create mode 100644 pkg/initializer/common/enroller/hsmdaemonenroller_test.go create mode 100644 pkg/initializer/common/enroller/hsmenroller.go create mode 100644 pkg/initializer/common/enroller/hsmenroller_test.go create mode 100644 pkg/initializer/common/enroller/hsmproxyenroller.go create mode 100644 pkg/initializer/common/enroller/mocks/caclient.go create mode 100644 pkg/initializer/common/enroller/mocks/client.go create mode 100644 pkg/initializer/common/enroller/mocks/cryptoenroller.go create mode 100644 pkg/initializer/common/enroller/mocks/cryptoinstance.go create mode 100644 pkg/initializer/common/enroller/mocks/hsmcaclient.go create mode 100644 pkg/initializer/common/enroller/mocks/instance.go create mode 100644 pkg/initializer/common/enroller/swenroller.go create mode 100644 pkg/initializer/common/enroller/swenroller_test.go create mode 100644 pkg/initializer/common/mocks/cryptovalidator.go create mode 100644 pkg/initializer/common/mspparser/mspparser.go create mode 100644 pkg/initializer/common/mspparser/mspparser_suite_test.go create mode 100644 pkg/initializer/common/mspparser/mspparser_test.go create mode 100644 pkg/initializer/common/secretmanager/secretmanager.go create mode 100644 pkg/initializer/common/secretmanager/secretmanager_suite_test.go create mode 100644 pkg/initializer/common/secretmanager/secretmanager_test.go create mode 100644 pkg/initializer/cryptogen/bccsp.go create mode 100644 pkg/initializer/cryptogen/mocks/config.go create mode 100644 pkg/initializer/cryptogen/mocks/instance.go create mode 100644 pkg/initializer/orderer/config/v1/config_suite_test.go create mode 100644 pkg/initializer/orderer/config/v1/config_test.go create mode 100644 pkg/initializer/orderer/config/v1/io.go create mode 100644 pkg/initializer/orderer/config/v1/orderer.go create mode 100644 pkg/initializer/orderer/config/v2/config_suite_test.go create mode 100644 pkg/initializer/orderer/config/v2/config_test.go create mode 100644 pkg/initializer/orderer/config/v2/io.go create mode 100644 pkg/initializer/orderer/config/v2/orderer.go create mode 100644 pkg/initializer/orderer/config/v24/config_suite_test.go create mode 100644 pkg/initializer/orderer/config/v24/config_test.go create mode 100644 pkg/initializer/orderer/config/v24/io.go create mode 100644 pkg/initializer/orderer/config/v24/orderer.go create mode 100644 pkg/initializer/orderer/configtx/config.go create mode 100644 pkg/initializer/orderer/configtx/configtx.go create mode 100644 pkg/initializer/orderer/configtx/configtx_suite_test.go create mode 100644 pkg/initializer/orderer/configtx/configtx_test.go create mode 100644 pkg/initializer/orderer/configtx/encoder.go create mode 100644 pkg/initializer/orderer/configtx/profile.go create mode 100644 pkg/initializer/orderer/configtx/profile_test.go create mode 100644 pkg/initializer/orderer/initializer.go create mode 100644 pkg/initializer/orderer/initializer_test.go create mode 100644 pkg/initializer/orderer/mocks/ibporderer.go create mode 100644 pkg/initializer/orderer/orderer.go create mode 100644 pkg/initializer/orderer/orderer_suite_test.go create mode 100644 pkg/initializer/peer/config/commoncore/commoncore_suite_test.go create mode 100644 pkg/initializer/peer/config/commoncore/commoncore_test.go create mode 100644 pkg/initializer/peer/config/commoncore/core.go create mode 100644 pkg/initializer/peer/config/commoncore/testdata/test_core.yaml create mode 100644 pkg/initializer/peer/config/commoncore/testdata/test_core_no_change.yaml create mode 100644 pkg/initializer/peer/config/commoncore/testdata/test_core_no_peer.yaml create mode 100644 pkg/initializer/peer/config/v1/config.go create mode 100644 pkg/initializer/peer/config/v1/config_suite_test.go create mode 100644 pkg/initializer/peer/config/v1/config_test.go create mode 100644 pkg/initializer/peer/config/v1/deliveryclient.go create mode 100644 pkg/initializer/peer/config/v1/io.go create mode 100644 pkg/initializer/peer/config/v2/config.go create mode 100644 pkg/initializer/peer/config/v2/config_test.go create mode 100644 pkg/initializer/peer/config/v2/v2_suite_test.go create mode 100644 pkg/initializer/peer/coreconfigmap.go create mode 100644 pkg/initializer/peer/coreconfigmap_test.go create mode 100644 pkg/initializer/peer/initializer.go create mode 100644 pkg/initializer/peer/initializer_test.go create mode 100644 pkg/initializer/peer/mocks/client.go create mode 100644 pkg/initializer/peer/mocks/ibppeer.go create mode 100644 pkg/initializer/peer/peer.go create mode 100644 pkg/initializer/peer/peer_suite_test.go create mode 100644 pkg/initializer/peer/peer_test.go create mode 100644 pkg/initializer/validator/validator.go create mode 100644 pkg/initializer/validator/validator_suite_test.go create mode 100644 pkg/initializer/validator/validator_test.go create mode 100644 pkg/k8s/clientset/client.go create mode 100644 pkg/k8s/controllerclient/client.go create mode 100644 pkg/k8s/controllerclient/client_structs.go create mode 100644 pkg/manager/resources/configmap/configmap_suite_test.go create mode 100644 pkg/manager/resources/configmap/manager.go create mode 100644 pkg/manager/resources/configmap/manager_test.go create mode 100644 pkg/manager/resources/container/container.go create mode 100644 pkg/manager/resources/container/container_suite_test.go create mode 100644 pkg/manager/resources/container/container_test.go create mode 100644 pkg/manager/resources/deployment/deployment.go create mode 100644 pkg/manager/resources/deployment/deployment_suite_test.go create mode 100644 pkg/manager/resources/deployment/manager.go create mode 100644 pkg/manager/resources/deployment/manager_test.go create mode 100644 pkg/manager/resources/ingress/ingress_suite_test.go create mode 100644 pkg/manager/resources/ingress/manager.go create mode 100644 pkg/manager/resources/ingress/manager_test.go create mode 100644 pkg/manager/resources/ingressv1beta1/ingress_suite_test.go create mode 100644 pkg/manager/resources/ingressv1beta1/manager.go create mode 100644 pkg/manager/resources/ingressv1beta1/manager_test.go create mode 100644 pkg/manager/resources/job/job.go create mode 100644 pkg/manager/resources/job/job_suite_test.go create mode 100644 pkg/manager/resources/job/job_test.go create mode 100644 pkg/manager/resources/job/mocks/client.go create mode 100644 pkg/manager/resources/manager/manager.go create mode 100644 pkg/manager/resources/mocks/resource_manager.go create mode 100644 pkg/manager/resources/orderernode/manager.go create mode 100644 pkg/manager/resources/orderernode/manager_test.go create mode 100644 pkg/manager/resources/orderernode/orderernode_suite_test.go create mode 100644 pkg/manager/resources/pv/manager.go create mode 100644 pkg/manager/resources/pv/manager_test.go create mode 100644 pkg/manager/resources/pv/pvc_suite_test.go create mode 100644 pkg/manager/resources/pvc/manager.go create mode 100644 pkg/manager/resources/pvc/manager_test.go create mode 100644 pkg/manager/resources/pvc/pvc_suite_test.go create mode 100644 pkg/manager/resources/resources.go create mode 100644 pkg/manager/resources/role/manager.go create mode 100644 pkg/manager/resources/role/manager_test.go create mode 100644 pkg/manager/resources/role/role_suite_test.go create mode 100644 pkg/manager/resources/rolebinding/manager.go create mode 100644 pkg/manager/resources/rolebinding/manager_test.go create mode 100644 pkg/manager/resources/rolebinding/rolebinding_suite_test.go create mode 100644 pkg/manager/resources/route/manager.go create mode 100644 pkg/manager/resources/route/manager_test.go create mode 100644 pkg/manager/resources/route/route_suite_test.go create mode 100644 pkg/manager/resources/service/manager.go create mode 100644 pkg/manager/resources/service/manager_test.go create mode 100644 pkg/manager/resources/service/service_suite_test.go create mode 100644 pkg/manager/resources/serviceaccount/manager.go create mode 100644 pkg/manager/resources/serviceaccount/manager_test.go create mode 100644 pkg/manager/resources/serviceaccount/serviceaccount_suite_test.go create mode 100644 pkg/migrator/initsecret/migrator.go create mode 100644 pkg/migrator/migrator.go create mode 100644 pkg/migrator/peer/fabric/fabric_suite_test.go create mode 100644 pkg/migrator/peer/fabric/migrator.go create mode 100644 pkg/migrator/peer/fabric/migrator_test.go create mode 100644 pkg/migrator/peer/fabric/mocks/migrator.go create mode 100644 pkg/migrator/peer/fabric/v2/mocks/configmapmanager.go create mode 100644 pkg/migrator/peer/fabric/v2/mocks/deploymentmanager.go create mode 100644 pkg/migrator/peer/fabric/v2/peer.go create mode 100644 pkg/migrator/peer/fabric/v2/peer_test.go create mode 100644 pkg/migrator/peer/fabric/v2/v2_suite_test.go create mode 100644 pkg/migrator/peer/peer_suite_test.go create mode 100644 pkg/offering/base/ca/ca.go create mode 100644 pkg/offering/base/ca/ca_suite_test.go create mode 100644 pkg/offering/base/ca/ca_test.go create mode 100644 pkg/offering/base/ca/initialize.go create mode 100644 pkg/offering/base/ca/initialize_test.go create mode 100644 pkg/offering/base/ca/mocks/certificate_manager.go create mode 100644 pkg/offering/base/ca/mocks/initialize.go create mode 100644 pkg/offering/base/ca/mocks/initializer.go create mode 100644 pkg/offering/base/ca/mocks/restart_manager.go create mode 100644 pkg/offering/base/ca/mocks/update.go create mode 100644 pkg/offering/base/ca/override/deployment.go create mode 100644 pkg/offering/base/ca/override/deployment_test.go create mode 100644 pkg/offering/base/ca/override/override.go create mode 100644 pkg/offering/base/ca/override/override_suite_test.go create mode 100644 pkg/offering/base/ca/override/override_test.go create mode 100644 pkg/offering/base/ca/override/overridecm.go create mode 100644 pkg/offering/base/ca/override/pvc.go create mode 100644 pkg/offering/base/ca/override/pvc_test.go create mode 100644 pkg/offering/base/ca/override/role.go create mode 100644 pkg/offering/base/ca/override/rolebinding.go create mode 100644 pkg/offering/base/ca/override/service.go create mode 100644 pkg/offering/base/ca/override/service_test.go create mode 100644 pkg/offering/base/ca/override/serviceaccount.go create mode 100644 pkg/offering/base/ca/override/serviceaccount_test.go create mode 100644 pkg/offering/base/console/console.go create mode 100644 pkg/offering/base/console/console_suite_test.go create mode 100644 pkg/offering/base/console/console_test.go create mode 100644 pkg/offering/base/console/mocks/restart_manager.go create mode 100644 pkg/offering/base/console/mocks/update.go create mode 100644 pkg/offering/base/console/override/consolecm.go create mode 100644 pkg/offering/base/console/override/consolecm_test.go create mode 100644 pkg/offering/base/console/override/deployercm.go create mode 100644 pkg/offering/base/console/override/deployercm_test.go create mode 100644 pkg/offering/base/console/override/deployerservice.go create mode 100644 pkg/offering/base/console/override/deployerservice_test.go create mode 100644 pkg/offering/base/console/override/deployment.go create mode 100644 pkg/offering/base/console/override/deployment_test.go create mode 100644 pkg/offering/base/console/override/envcm.go create mode 100644 pkg/offering/base/console/override/envcm_test.go create mode 100644 pkg/offering/base/console/override/override.go create mode 100644 pkg/offering/base/console/override/override_suite_test.go create mode 100644 pkg/offering/base/console/override/pvc.go create mode 100644 pkg/offering/base/console/override/pvc_test.go create mode 100644 pkg/offering/base/console/override/service.go create mode 100644 pkg/offering/base/console/override/service_test.go create mode 100644 pkg/offering/base/console/override/serviceaccount.go create mode 100644 pkg/offering/base/console/override/serviceaccount_test.go create mode 100644 pkg/offering/base/orderer/mocks/certificate_manager.go create mode 100644 pkg/offering/base/orderer/mocks/deployment_manager.go create mode 100644 pkg/offering/base/orderer/mocks/initializeibporderer.go create mode 100644 pkg/offering/base/orderer/mocks/node_manager.go create mode 100644 pkg/offering/base/orderer/mocks/restart_manager.go create mode 100644 pkg/offering/base/orderer/mocks/update.go create mode 100644 pkg/offering/base/orderer/node.go create mode 100644 pkg/offering/base/orderer/node_test.go create mode 100644 pkg/offering/base/orderer/orderer.go create mode 100644 pkg/offering/base/orderer/orderer_suite_test.go create mode 100644 pkg/offering/base/orderer/orderer_test.go create mode 100644 pkg/offering/base/orderer/override/deployment.go create mode 100644 pkg/offering/base/orderer/override/deployment_test.go create mode 100644 pkg/offering/base/orderer/override/envcm.go create mode 100644 pkg/offering/base/orderer/override/orderernode.go create mode 100644 pkg/offering/base/orderer/override/override.go create mode 100644 pkg/offering/base/orderer/override/override_suite_test.go create mode 100644 pkg/offering/base/orderer/override/override_test.go create mode 100644 pkg/offering/base/orderer/override/pvc.go create mode 100644 pkg/offering/base/orderer/override/pvc_test.go create mode 100644 pkg/offering/base/orderer/override/service.go create mode 100644 pkg/offering/base/orderer/override/service_test.go create mode 100644 pkg/offering/base/orderer/override/serviceaccount.go create mode 100644 pkg/offering/base/orderer/override/serviceaccount_test.go create mode 100644 pkg/offering/base/peer/mocks/certificate_manager.go create mode 100644 pkg/offering/base/peer/mocks/deployment_manager.go create mode 100644 pkg/offering/base/peer/mocks/initializer.go create mode 100644 pkg/offering/base/peer/mocks/restart_manager.go create mode 100644 pkg/offering/base/peer/mocks/update.go create mode 100644 pkg/offering/base/peer/override/deployment.go create mode 100644 pkg/offering/base/peer/override/deployment_test.go create mode 100644 pkg/offering/base/peer/override/override.go create mode 100644 pkg/offering/base/peer/override/override_suite_test.go create mode 100644 pkg/offering/base/peer/override/override_test.go create mode 100644 pkg/offering/base/peer/override/pvc.go create mode 100644 pkg/offering/base/peer/override/pvc_test.go create mode 100644 pkg/offering/base/peer/override/service.go create mode 100644 pkg/offering/base/peer/override/service_test.go create mode 100644 pkg/offering/base/peer/override/serviceaccount.go create mode 100644 pkg/offering/base/peer/override/serviceaccount_test.go create mode 100644 pkg/offering/base/peer/override/statedbpvc.go create mode 100644 pkg/offering/base/peer/peer.go create mode 100644 pkg/offering/base/peer/peer_suite_test.go create mode 100644 pkg/offering/base/peer/peer_test.go create mode 100644 pkg/offering/common/backupcrypto.go create mode 100644 pkg/offering/common/common_suite_test.go create mode 100644 pkg/offering/common/common_test.go create mode 100644 pkg/offering/common/override.go create mode 100644 pkg/offering/common/reconcilechecks/fabricversion.go create mode 100644 pkg/offering/common/reconcilechecks/fabricversion_test.go create mode 100644 pkg/offering/common/reconcilechecks/images/fabricversion.go create mode 100644 pkg/offering/common/reconcilechecks/images/fabricversion_test.go create mode 100644 pkg/offering/common/reconcilechecks/images/images.go create mode 100644 pkg/offering/common/reconcilechecks/images/images_suite_test.go create mode 100644 pkg/offering/common/reconcilechecks/images/images_test.go create mode 100644 pkg/offering/common/reconcilechecks/images/mocks/fabricversion.go create mode 100644 pkg/offering/common/reconcilechecks/images/mocks/instance.go create mode 100644 pkg/offering/common/reconcilechecks/images/mocks/update.go create mode 100644 pkg/offering/common/reconcilechecks/mocks/image.go create mode 100644 pkg/offering/common/reconcilechecks/mocks/instance.go create mode 100644 pkg/offering/common/reconcilechecks/mocks/update.go create mode 100644 pkg/offering/common/reconcilechecks/mocks/version.go create mode 100644 pkg/offering/common/reconcilechecks/reconcilechecks_suite_test.go create mode 100644 pkg/offering/common/result.go create mode 100644 pkg/offering/common/secret.go create mode 100644 pkg/offering/k8s/ca/ca.go create mode 100644 pkg/offering/k8s/ca/ca_suite_test.go create mode 100644 pkg/offering/k8s/ca/ca_test.go create mode 100644 pkg/offering/k8s/ca/override/ingress.go create mode 100644 pkg/offering/k8s/ca/override/ingress_test.go create mode 100644 pkg/offering/k8s/ca/override/ingressv1beta1.go create mode 100644 pkg/offering/k8s/ca/override/ingressv1beta1_test.go create mode 100644 pkg/offering/k8s/ca/override/override.go create mode 100644 pkg/offering/k8s/ca/override/override_suite_test.go create mode 100644 pkg/offering/k8s/ca/override/override_test.go create mode 100644 pkg/offering/k8s/console/console.go create mode 100644 pkg/offering/k8s/console/console_suite_test.go create mode 100644 pkg/offering/k8s/console/console_test.go create mode 100644 pkg/offering/k8s/console/override/consolecm.go create mode 100644 pkg/offering/k8s/console/override/consolecm_test.go create mode 100644 pkg/offering/k8s/console/override/deployercm.go create mode 100644 pkg/offering/k8s/console/override/deployercm_test.go create mode 100644 pkg/offering/k8s/console/override/envcm.go create mode 100644 pkg/offering/k8s/console/override/envcm_test.go create mode 100644 pkg/offering/k8s/console/override/ingress.go create mode 100644 pkg/offering/k8s/console/override/ingress_test.go create mode 100644 pkg/offering/k8s/console/override/ingressv1beta1.go create mode 100644 pkg/offering/k8s/console/override/ingressv1beta1_test.go create mode 100644 pkg/offering/k8s/console/override/override.go create mode 100644 pkg/offering/k8s/console/override/override_suite_test.go create mode 100644 pkg/offering/k8s/console/override/override_test.go create mode 100644 pkg/offering/k8s/orderer/node.go create mode 100644 pkg/offering/k8s/orderer/orderer.go create mode 100644 pkg/offering/k8s/orderer/orderer_suite_test.go create mode 100644 pkg/offering/k8s/orderer/orderer_test.go create mode 100644 pkg/offering/k8s/orderer/override/ingress.go create mode 100644 pkg/offering/k8s/orderer/override/ingress_test.go create mode 100644 pkg/offering/k8s/orderer/override/ingressv1beta1.go create mode 100644 pkg/offering/k8s/orderer/override/ingressv1beta1_test.go create mode 100644 pkg/offering/k8s/orderer/override/override.go create mode 100644 pkg/offering/k8s/orderer/override/override_suite_test.go create mode 100644 pkg/offering/k8s/orderer/override/override_test.go create mode 100644 pkg/offering/k8s/peer/override/ingress.go create mode 100644 pkg/offering/k8s/peer/override/ingress_test.go create mode 100644 pkg/offering/k8s/peer/override/ingressv1beta1.go create mode 100644 pkg/offering/k8s/peer/override/ingressv1beta1_test.go create mode 100644 pkg/offering/k8s/peer/override/override.go create mode 100644 pkg/offering/k8s/peer/override/override_suite_test.go create mode 100644 pkg/offering/k8s/peer/peer.go create mode 100644 pkg/offering/k8s/peer/peer_suite_test.go create mode 100644 pkg/offering/k8s/peer/peer_test.go create mode 100644 pkg/offering/offering.go create mode 100644 pkg/offering/offering_suite_test.go create mode 100644 pkg/offering/offering_test.go create mode 100644 pkg/offering/openshift/ca/ca.go create mode 100644 pkg/offering/openshift/ca/ca_suite_test.go create mode 100644 pkg/offering/openshift/ca/ca_test.go create mode 100644 pkg/offering/openshift/ca/override/caroute.go create mode 100644 pkg/offering/openshift/ca/override/operationroute.go create mode 100644 pkg/offering/openshift/ca/override/override.go create mode 100644 pkg/offering/openshift/ca/override/override_suite_test.go create mode 100644 pkg/offering/openshift/ca/override/override_test.go create mode 100644 pkg/offering/openshift/console/console.go create mode 100644 pkg/offering/openshift/console/console_suite_test.go create mode 100644 pkg/offering/openshift/console/console_test.go create mode 100644 pkg/offering/openshift/console/override/consolecm.go create mode 100644 pkg/offering/openshift/console/override/consolecm_test.go create mode 100644 pkg/offering/openshift/console/override/consoleroute.go create mode 100644 pkg/offering/openshift/console/override/consoleroute_test.go create mode 100644 pkg/offering/openshift/console/override/deployercm.go create mode 100644 pkg/offering/openshift/console/override/deployercm_test.go create mode 100644 pkg/offering/openshift/console/override/envcm.go create mode 100644 pkg/offering/openshift/console/override/envcm_test.go create mode 100644 pkg/offering/openshift/console/override/override.go create mode 100644 pkg/offering/openshift/console/override/override_suite_test.go create mode 100644 pkg/offering/openshift/console/override/proxyroute.go create mode 100644 pkg/offering/openshift/console/override/proxyroute_test.go create mode 100644 pkg/offering/openshift/orderer/node.go create mode 100644 pkg/offering/openshift/orderer/orderer.go create mode 100644 pkg/offering/openshift/orderer/orderer_suite_test.go create mode 100644 pkg/offering/openshift/orderer/orderer_test.go create mode 100644 pkg/offering/openshift/orderer/override/adminroute.go create mode 100644 pkg/offering/openshift/orderer/override/grpcroute.go create mode 100644 pkg/offering/openshift/orderer/override/operationroute.go create mode 100644 pkg/offering/openshift/orderer/override/ordererroute.go create mode 100644 pkg/offering/openshift/orderer/override/override.go create mode 100644 pkg/offering/openshift/orderer/override/override_suite_test.go create mode 100644 pkg/offering/openshift/orderer/override/override_test.go create mode 100644 pkg/offering/openshift/peer/override/grpcroute.go create mode 100644 pkg/offering/openshift/peer/override/operationroute.go create mode 100644 pkg/offering/openshift/peer/override/override.go create mode 100644 pkg/offering/openshift/peer/override/override_suite_test.go create mode 100644 pkg/offering/openshift/peer/override/override_test.go create mode 100644 pkg/offering/openshift/peer/override/peerroute.go create mode 100644 pkg/offering/openshift/peer/peer.go create mode 100644 pkg/offering/openshift/peer/peer_suite_test.go create mode 100644 pkg/offering/openshift/peer/peer_test.go create mode 100644 pkg/operatorerrors/errors.go create mode 100644 pkg/operatorerrors/errors_test.go create mode 100644 pkg/operatorerrors/operatorerrors_suite_test.go create mode 100644 pkg/restart/configmap/configmap_suite_test.go create mode 100644 pkg/restart/configmap/configmap_test.go create mode 100644 pkg/restart/configmap/manager.go create mode 100644 pkg/restart/restart.go create mode 100644 pkg/restart/restart_structs.go create mode 100644 pkg/restart/restart_suite_test.go create mode 100644 pkg/restart/restart_test.go create mode 100644 pkg/restart/staggerrestarts/staggerrestarts.go create mode 100644 pkg/restart/staggerrestarts/staggerrestarts_structs.go create mode 100644 pkg/restart/staggerrestarts/staggerrestarts_suite_test.go create mode 100644 pkg/restart/staggerrestarts/staggerrestarts_test.go create mode 100644 pkg/util/image/image.go create mode 100644 pkg/util/merge/merge.go create mode 100644 pkg/util/merge/merge_suite_test.go create mode 100644 pkg/util/merge/merge_test.go create mode 100644 pkg/util/pointer/pointer.go create mode 100644 pkg/util/testdata/invalid_kind.yaml create mode 100644 pkg/util/util.go create mode 100644 pkg/util/util_suite_test.go create mode 100644 pkg/util/util_test.go create mode 100644 sample-network/.gitignore create mode 100644 sample-network/README.md create mode 100644 sample-network/config/cas/kustomization.yaml create mode 100644 sample-network/config/cas/org0-ca.yaml create mode 100644 sample-network/config/cas/org1-ca.yaml create mode 100644 sample-network/config/cas/org2-ca.yaml create mode 100644 sample-network/config/configtx-template.yaml create mode 100644 sample-network/config/console/hlf-operations-console.yaml create mode 100644 sample-network/config/console/kustomization.yaml create mode 100644 sample-network/config/core.yaml create mode 100644 sample-network/config/manager/hlf-operator-manager.yaml create mode 100644 sample-network/config/manager/kustomization.yaml create mode 100644 sample-network/config/orderers/kustomization.yaml create mode 100644 sample-network/config/orderers/org0-orderers.yaml create mode 100644 sample-network/config/peers/kustomization.yaml create mode 100644 sample-network/config/peers/org1-peer1.yaml create mode 100644 sample-network/config/peers/org1-peer2.yaml create mode 100644 sample-network/config/peers/org2-peer1.yaml create mode 100644 sample-network/config/peers/org2-peer2.yaml create mode 100644 sample-network/config/rbac/hlf-operator-clusterrole.yaml create mode 100644 sample-network/config/rbac/hlf-operator-clusterrolebinding.yaml create mode 100644 sample-network/config/rbac/hlf-operator-rolebinding.yaml create mode 100644 sample-network/config/rbac/hlf-operator-serviceaccount.yaml create mode 100644 sample-network/config/rbac/hlf-psp.yaml create mode 100644 sample-network/config/rbac/kustomization.yaml create mode 100755 sample-network/network create mode 100755 sample-network/scripts/chaincode.sh create mode 100644 sample-network/scripts/channel.sh create mode 100644 sample-network/scripts/cluster.sh create mode 100644 sample-network/scripts/console.sh create mode 100644 sample-network/scripts/kind.sh create mode 100644 sample-network/scripts/prereqs.sh create mode 100644 sample-network/scripts/test_network.sh create mode 100644 sample-network/scripts/utils.sh create mode 100755 scripts/check-license.sh create mode 100755 scripts/checks.sh create mode 100755 scripts/download_binaries.sh create mode 100755 scripts/go-sec.sh create mode 100755 scripts/install-tools.sh create mode 100755 scripts/run-unit-tests.sh create mode 100644 testdata/deploy/ca/adminsecret.yaml create mode 100644 testdata/deploy/ca/tlssecret.yaml create mode 100644 testdata/deploy/console/secret.yaml create mode 100644 testdata/deploy/console/tlssecret.yaml create mode 100644 testdata/deploy/console/ui-password-secret.yaml create mode 100644 testdata/deploy/operator.yaml create mode 100644 testdata/deploy/operatorhsm.yaml create mode 100644 testdata/deploy/orderer/secret.yaml create mode 100644 testdata/deploy/peer/secret.yaml create mode 100644 testdata/deploy/role.yaml create mode 100644 testdata/deploy/role_binding.yaml create mode 100644 testdata/deploy/role_ocp.yaml create mode 100644 testdata/deploy/service_account.yaml create mode 100644 testdata/deployercm/deployer-configmap.yaml create mode 100644 testdata/init/ca/cert.pem create mode 100644 testdata/init/ca/key.pem create mode 100644 testdata/init/ca/override.yaml create mode 100644 testdata/init/orderer/configtx.yaml create mode 100644 testdata/init/orderer/msp/cacerts/cert.pem create mode 100644 testdata/init/orderer/orderer.yaml create mode 100644 testdata/init/peer/core.yaml create mode 100644 testdata/init/peer/core_bootstrap_test.yaml create mode 100644 testdata/init/peer/core_invalid.yaml create mode 100644 testdata/init/peer/tls-cert.pem create mode 100644 testdata/init/peer/tls-key.pem create mode 100644 testdata/migration/secret.json create mode 100644 testdata/msp/keystore/key.pem create mode 100644 testdata/operatorconfig.yaml create mode 100644 testdata/secret.yaml create mode 100644 testdata/tls/tls.crt create mode 100644 testdata/tls/tls.key create mode 100644 tools/tools.go create mode 100644 version/fabricversion.go create mode 100644 version/version.go create mode 100644 version/version_suite_test.go create mode 100644 version/version_test.go diff --git a/.github/workflows/image-build-pr.yaml b/.github/workflows/image-build-pr.yaml new file mode 100644 index 00000000..fa162491 --- /dev/null +++ b/.github/workflows/image-build-pr.yaml @@ -0,0 +1,16 @@ +name: Build Operator image + +on: + pull_request: + branches: [main] + +jobs: + image: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Build + run: | + scripts/install-tools.sh + make image diff --git a/.github/workflows/image-build.yaml b/.github/workflows/image-build.yaml new file mode 100644 index 00000000..28472d69 --- /dev/null +++ b/.github/workflows/image-build.yaml @@ -0,0 +1,20 @@ +name: Build Operator image + +on: + push: + branches: [main] + +jobs: + image: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Build + run: | + scripts/install-tools.sh + make image + - name: Push + run: | + docker login ghcr.io -u $GITHUB_ACTOR -p ${{ secrets.CR_TOKEN }} + make image-push image-push-latest diff --git a/.github/workflows/integration-tests.yaml b/.github/workflows/integration-tests.yaml new file mode 100644 index 00000000..50a712c0 --- /dev/null +++ b/.github/workflows/integration-tests.yaml @@ -0,0 +1,81 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: Integration Test + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + KUBECONFIG_PATH: /tmp/kubeconfig.yaml + OPERATOR_NAMESPACE: inttest + DOCKERCONFIGJSON: ${{ secrets.DOCKERCONFIGJSON }} + +jobs: + suite: + runs-on: ubuntu-latest + + strategy: + matrix: + suite: + - ca + - peer + - orderer + - console +# - init +# - migration +# - e2ev2 +# - actions/ca +# - actions/orderer +# - actions/peer +# - autorenew +# - cclauncher +# - restartmgr +# - operatorrestart + + steps: + - uses: actions/checkout@v3 + + - name: Set up go + uses: actions/setup-go@v3 + with: + go-version: "1.17.9" + + - name: Set up ginkgo + run: | + go get github.com/onsi/ginkgo/ginkgo + go get github.com/onsi/gomega/... + + - name: Set up KIND k8s cluster + run: | + make kind + kubectl config view --raw > /tmp/kubeconfig.yaml + + - name: Install Fabric CRDs + run: | + kubectl kustomize config/crd | kubectl apply -f - + + - name: Run ${{ matrix.suite }} integration tests + run: make integration-tests +# run: | +# sleep 360 && kubectl --kubeconfig $KUBECONFIG_PATH describe pods --all-namespaces & +# make integration-tests + env: + INT_TEST_NAME: ${{ matrix.suite }} diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml new file mode 100644 index 00000000..5c4a5f13 --- /dev/null +++ b/.github/workflows/unit-tests.yaml @@ -0,0 +1,43 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: unit-tests + +on: + # TODO: uncomment this when moved to hyperledger-labs repo + # push: + # branches: [main] + pull_request: + branches: [main] + +jobs: + make-checks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up go + uses: actions/setup-go@v3 + with: + go-version: "1.17.9" + - name: license header checks + run: scripts/check-license.sh + # TODO: run in hyperledger-labs + # - name: gosec + # run: scripts/go-sec.sh + - name: run tests + run: make test diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..b596b9d2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,95 @@ +# Temporary Build Files +build/_output +build/_test +# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* +# Org-mode +.org-id-locations +*_archive +# flymake-mode +*_flymake.* +# eshell files +/eshell/history +/eshell/lastdir +# elpa packages +/elpa/ +# reftex files +*.rel +# AUCTeX auto folder +/auto/ +# cask packages +.cask/ +dist/ +# Flycheck +flycheck_*.el +# server auth directory +/server/ +# projectiles files +.projectile +projectile-bookmarks.eld +# directory configuration +.dir-locals.el +# saveplace +places +# url cache +url/cache/ +# cedet +ede-projects.el +# smex +smex-items +# company-statistics +company-statistics-cache.el +# anaconda-mode +anaconda-mode/ +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +# Test binary, build with 'go test -c' +*.test +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +### Vim ### +# swap +.sw[a-p] +.*.sw[a-p] +# session +Session.vim +# temporary +.netrwhist +# auto-generated tag files +tags +### VisualStudioCode ### +.vscode/* +.history +# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode +testdata/kubeconfig.yml +testdata/deploy/test_operator.yaml +testdata/deploy/test_role_binding.yaml +testdata/tlsCert.pem +build/_output +bin +vendor +golang_copyright.txt +shell_copyright.txt +kubeconfig.yml +.env +*.bak +temp.* +integration/*/config +integration/actions/*/config +scripts/images/clusterserviceversion.yaml +.vscode +.idea/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..4af4582e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,36 @@ +ARG ARCH +ARG REGISTRY +ARG GO_VER + +########## Build operator binary ########## +FROM registry.access.redhat.com/ubi8/go-toolset:$GO_VER as builder +COPY . /go/src/github.com/IBM-Blockchain/fabric-operator +WORKDIR /go/src/github.com/IBM-Blockchain/fabric-operator +RUN GOOS=linux GOARCH=$(go env GOARCH) CGO_ENABLED=1 go build -mod=vendor -tags "pkcs11" -gcflags all=-trimpath=${GOPATH} -asmflags all=-trimpath=${GOPATH} -o /tmp/build/_output/bin/ibp-operator + +########## Final Image ########## +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ENV OPERATOR=/usr/local/bin/ibp-operator + +COPY --from=builder /tmp/build/_output/bin/ibp-operator ${OPERATOR} +COPY build/ /usr/local/bin +COPY definitions /definitions +COPY config/crd/bases /deploy/crds +COPY defaultconfig /defaultconfig +COPY docker-entrypoint.sh . +RUN microdnf update \ + && microdnf install -y \ + shadow-utils \ + iputils \ + && groupadd -g 7051 fabric-user \ + && useradd -u 7051 -g fabric-user -s /bin/bash fabric-user \ + && mkdir /licenses \ + && microdnf remove shadow-utils \ + && microdnf clean all \ + && chown -R fabric-user:fabric-user licenses \ + && /usr/local/bin/user_setup + +USER fabric-user +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["/usr/local/bin/entrypoint"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..f9beb991 --- /dev/null +++ b/Makefile @@ -0,0 +1,232 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +IMAGE ?= ghcr.io/ibm-blockchain/fabric-operator +TAG ?= $(shell git rev-parse --short HEAD) +ARCH ?= $(shell go env GOARCH) +OSS_GO_VER ?= 1.17.7 +BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") +OS = $(shell go env GOOS) + +DOCKER_IMAGE_REPO ?= ghcr.io + +BUILD_ARGS=--build-arg ARCH=$(ARCH) +BUILD_ARGS+=--build-arg BUILD_ID=$(TAG) +BUILD_ARGS+=--build-arg BUILD_DATE=$(BUILD_DATE) +BUILD_ARGS+=--build-arg GO_VER=$(OSS_GO_VER) + +ifneq ($(origin TRAVIS_PULL_REQUEST),undefined) + ifneq ($(TRAVIS_PULL_REQUEST), false) + TAG=pr-$(TRAVIS_PULL_REQUEST) + endif +endif + +NAMESPACE ?= n$(shell echo $(TAG) | tr -d "-") + +.PHONY: build + +build: ## Builds the starter pack + mkdir -p bin && go build -o bin/operator + +image: setup + docker build --rm . -f Dockerfile $(BUILD_ARGS) -t $(IMAGE):$(TAG)-$(ARCH) + docker tag $(IMAGE):$(TAG)-$(ARCH) $(IMAGE):latest-$(ARCH) + +govendor: + @go mod vendor + +setup: govendor manifests bundle generate + +image-push: + docker push $(IMAGE):$(TAG)-$(ARCH) + +image-push-latest: + docker push $(IMAGE):latest-$(ARCH) + +login: + docker login --username $(DOCKER_USERNAME) --password $(DOCKER_PASSWORD) $(DOCKER_IMAGE_REPO) + +####################################### +#### part of autogenerate makefile #### +####################################### + +# Current Operator version +VERSION ?= "1.0.0" +# Default bundle image tag +BUNDLE_IMG ?= controller-bundle:$(VERSION) +# Options for 'bundle-build' +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:crdVersions=v1" + +# KIND cluster for local development, integration, and E2E testing +KIND_CLUSTER_NAME ?= fabric +KIND_KUBE_VERSION ?= v1.20.15 # Matches integ IKS cluster rev. v1.23.4 is current +KIND_NODE_IMAGE ?= kindest/node:$(KIND_KUBE_VERSION) + +# Integration test parameters +INT_TEST_TIMEOUT ?= 60m +INT_TEST_NAME ?= * + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +all: manager + +# Run tests +test: generate fmt vet manifests + @scripts/run-unit-tests.sh + +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go + +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet manifests + go run ./main.go + +# Install CRDs into a cluster +install: manifests kustomize + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +# Uninstall CRDs from a cluster +uninstall: manifests kustomize + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests kustomize + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +# Create a KIND K8s and Nginx ingress controller on :80 / :443 +kind: kustomize + kind create cluster --name $(KIND_CLUSTER_NAME) --config=integration/kind-config.yaml --image $(KIND_NODE_IMAGE) + $(KUSTOMIZE) build config/ingress/kind | kubectl apply -f - + +# Destroy the local KIND cluster +unkind: + kind delete cluster --name $(KIND_CLUSTER_NAME) + +# Run integration tests. Target a specific test package by specifying INT_TEST in the make env. +# If INT_TEST is unspecified, run ALL tests (slow!!) +integration-tests: + ginkgo -v -failFast -timeout $(INT_TEST_TIMEOUT) ./integration/$(INT_TEST_NAME) + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + @scripts/checks.sh + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile="boilerplate/boilerplate.go.txt" paths="./..." + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + +# Push the docker image +docker-push: + docker push ${IMG} + +# find or download controller-gen +# download controller-gen if necessary +controller-gen: +ifeq (, $(shell which controller-gen)) + @{ \ + set -e ;\ + CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ + cd $$CONTROLLER_GEN_TMP_DIR ;\ + go mod init tmp ;\ + go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0 ;\ + rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ + } +CONTROLLER_GEN=$(GOBIN)/controller-gen +else +CONTROLLER_GEN=$(shell which controller-gen) +endif + +kustomize: +ifeq (, $(shell which kustomize)) + @{ \ + set -e ;\ + KUSTOMIZE_GEN_TMP_DIR=$$(mktemp -d) ;\ + cd $$KUSTOMIZE_GEN_TMP_DIR ;\ + go mod init tmp ;\ + go install sigs.k8s.io/kustomize/kustomize/v3@v3.5.4 ;\ + rm -rf $$KUSTOMIZE_GEN_TMP_DIR ;\ + } +KUSTOMIZE=$(GOBIN)/kustomize +else +KUSTOMIZE=$(shell which kustomize) +endif + +# Generate bundle manifests and metadata, then validate generated files. +bundle: manifests + operator-sdk generate kustomize manifests -q + kustomize build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + operator-sdk bundle validate ./bundle + +# Build the bundle image. +bundle-build: + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: opm +OPM = ./bin/opm +opm: +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$(OS)-$(ARCH)-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif +BUNDLE_IMGS ?= $(BUNDLE_IMG) +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) ifneq ($(origin CATALOG_BASE_IMG), undefined) FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) endif +.PHONY: catalog-build +catalog-build: opm + $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +.PHONY: catalog-push +catalog-push: ## Push the catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) diff --git a/PROJECT b/PROJECT new file mode 100644 index 00000000..514977f1 --- /dev/null +++ b/PROJECT @@ -0,0 +1,33 @@ +domain: ibp.com +layout: go.kubebuilder.io/v3 +repo: github.com/IBM-Blockchain/fabric-operator +projectName: fabric-opensource-operator +resources: + - controller: true + domain: ibp.com + group: ibp + kind: IBPCA + path: github.com/IBM-Blockchain/fabric-operator/api/v1beta1 + version: v1beta1 + - controller: true + domain: ibp.com + group: ibp + kind: IBPPeer + path: github.com/IBM-Blockchain/fabric-operator/api/v1beta1 + version: v1beta1 + - controller: true + domain: ibp.com + group: ibp + kind: IBPOrderer + path: github.com/IBM-Blockchain/fabric-operator/api/v1beta1 + version: v1beta1 + - controller: true + domain: ibp.com + group: ibp + kind: IBPConsole + path: github.com/IBM-Blockchain/fabric-operator/api/v1beta1 + version: v1beta1 +version: "3" +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} diff --git a/README.md b/README.md new file mode 100644 index 00000000..b05a1602 --- /dev/null +++ b/README.md @@ -0,0 +1,77 @@ +# fabric-operator + +**fabric-operator** is an open-source, cloud-native [Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) +for managing Hyperledger Fabric networks on Kubernetes. The operator follows the [CNCF Operator Pattern](link), +reducing the minutia of performing repetitive, detailed configuration tasks to automated activities performed under +the guidance of software-based controllers. + +Using the operator, a Fabric network is realized in a declarative fashion by applying a series of `CA`, `Peer`, +`Orderer`, and `Console` resources to the Kubernetes API. In turn, the controller executes a _reconciliation loop_, +orchestrating containers, storage, and configuration to achieve the desired target state. + +By analogy to a sailing metaphor, fabric-operator serves as the _eXecutive Officer / XO_ of a vessel. It +allows you, the captain, to invest valuable time and energy formulating strategic objectives for a blockchain +deployment. The operator, or XO, is responsible to enact the plan, and _"make it so."_ + +_Fabric, Ahoy!_ + +![Operator Components](docs/images/fabric-operator-components.png) + + +## Feature Benefits + +- [x] It slices +- [x] It dices +- [ ] It folds your laundry +- [x] It configures Fabric networks +- [x] It configures Fabric networks on any Kube (even on your laptop) +- [x] It configures Fabric networks with K8s APIs (kubectl, kustomize, helm, SDK clients,...) +- [x] It configures Fabric networks with a web browser +- [x] It configures Fabric networks with Ansible +- [x] It configures Fabric networks with native Fabric CLI binaries +- [x] It configures Fabric networks with CI/CD and git-ops best-practices +- [x] It deploys _Chaincode Now!!!_ (integrated `ccaas` and `k8s` external builders) +- [x] It detects expiring and expired x509 certificates +- [x] It will provide migration and future LTS revision support +- [x] It manages hybrid cloud, multi-org, and multi-cluster Fabric networks +- [x] It runs on pure containerd _and_ mobyd (no dependencies on Docker/DIND) +- [x] It provides wildcard DNS, SNI, and OCP domain ingress routing +- [x] It is battle tested +- [x] It is backed by commercial-grade, enterprise support offerings from IBM +- [x] It ... _just works_. Enjoy! + + +## Future Benefits + +- [ ] Declarative Fabric resources : `Channel`, `Chaincode`, `Organization`, `Consortium` / MSP, ... CRDs +- [ ] Service Mesh Overlay (Linkerd, Istio, ...) with mTLS +- [ ] Metrics and observability with Prometheus and Grafana +- [ ] Operational management: Log aggregation, monitoring, alerting +- [ ] Modular CAs (Fabric CA, cert-manager.io, Vault, letsencrypt, ...) +- [ ] Automatic x509 certificate renewal +- [ ] Backup / Recovery / Upgrade +- [ ] Idemixer, Token SDK, BFT Orderer +- [ ] Layer II blockchain integration (Cactus, Weaver, Token SDK, ...) +- [ ] `kubectl`, `fabctl`, `fabric-cli` command-line extensions. + + +## Build a Fabric Network + +- Build a [sample-network](sample-network) with kubectl on a local [KIND](https://kind.sigs.k8s.io) or [k3s](https://rancherdesktop.io) cluster +- [Build a Network](https://cloud.ibm.com/docs/blockchain?topic=blockchain-ibp-console-build-network) with the [Fabric Operations Console](https://github.com/hyperledger-labs/fabric-operations-console) +- Automate your network with [Ansible Playbooks](https://cloud.ibm.com/docs/blockchain?topic=blockchain-ansible) and the Console REST APIs + + +## Build the Fabric Operator + +- How to [compile](docs/DEVELOPING.md#build-the-operator) the operator +- How to [unit test](docs/DEVELOPING.md#unit-tests) the operator +- How to [launch + debug](docs/DEVELOPING.md#debug-the-operator) the operator +- How to [contribute](docs/CONTRIBUTING.md) to this project. + + +## Community Guidelines + +- This is an open community project. Be KIND to your peers. +- Focus on **outcomes** (_where are we going_), not **mechanics** (_how will we get there_). +- Discussion, Comments, and Action at Hyperledger Discord : [#fabric-kubernetes](https://discord.gg/hyperledger) diff --git a/api/addtoscheme_ibp_v1beta1.go b/api/addtoscheme_ibp_v1beta1.go new file mode 100644 index 00000000..3de11f24 --- /dev/null +++ b/api/addtoscheme_ibp_v1beta1.go @@ -0,0 +1,28 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package apis + +import ( + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme) +} diff --git a/api/apis.go b/api/apis.go new file mode 100644 index 00000000..8b086acd --- /dev/null +++ b/api/apis.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/api/v1beta1/common.go b/api/v1beta1/common.go new file mode 100644 index 00000000..9478617d --- /dev/null +++ b/api/v1beta1/common.go @@ -0,0 +1,59 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "errors" + "fmt" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +// Component is a custom type that enumerates all the components (containers) +type Component string + +const ( + INIT Component = "INIT" + CA Component = "CA" + ORDERER Component = "ORDERER" + PEER Component = "PEER" + GRPCPROXY Component = "GRPCPROXY" + FLUENTD Component = "FLUENTD" + DIND Component = "DIND" + COUCHDB Component = "COUCHDB" + CCLAUNCHER Component = "CCLAUNCHER" + ENROLLER Component = "ENROLLER" + HSMDAEMON Component = "HSMDAEMON" +) + +func (crn *CRN) String() string { + return fmt.Sprintf("crn:%s:%s:%s:%s:%s:%s:%s:%s:%s", + crn.Version, crn.CName, crn.CType, crn.Servicename, crn.Location, crn.AccountID, crn.InstanceID, crn.ResourceType, crn.ResourceID) +} + +func (catls *CATLS) GetBytes() ([]byte, error) { + return util.Base64ToBytes(catls.CACert) +} + +func (e *Enrollment) GetCATLSBytes() ([]byte, error) { + if e.CATLS != nil { + return e.CATLS.GetBytes() + } + return nil, errors.New("no CA TLS certificate set") +} diff --git a/api/v1beta1/common_struct.go b/api/v1beta1/common_struct.go new file mode 100644 index 00000000..adb6bf27 --- /dev/null +++ b/api/v1beta1/common_struct.go @@ -0,0 +1,326 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +var BoolTrue = true +var BoolFalse = false + +// Service is the overrides to be used for Service of the component +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type Service struct { + // The "type" of the service to be used + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Type corev1.ServiceType `json:"type,omitempty"` +} + +// StorageSpec is the overrides to be used for storage of the component +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type StorageSpec struct { + // Size of storage + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Size string `json:"size,omitempty"` + + // Class is the storage class + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Class string `json:"class,omitempty"` +} + +// NetworkInfo is the overrides for the network of the component +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type NetworkInfo struct { + // Domain for the components + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Domain string `json:"domain,omitempty"` + + // ConsolePort is the port to access the console + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConsolePort int32 `json:"consolePort,omitempty"` + + // ConfigtxlatorPort is the port to access configtxlator + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConfigtxlatorPort int32 `json:"configtxlatorPort,omitempty"` + + // ProxyPort is the port to access console proxy + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ProxyPort int32 `json:"proxyPort,omitempty"` +} + +// Ingress (Optional) is the list of overrides for ingress of the components +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type Ingress struct { + // TlsSecretName (Optional) is the secret name to be used for tls certificates + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TlsSecretName string `json:"tlsSecretName,omitempty"` + + // Class (Optional) is the class to set for ingress + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Class string `json:"class,omitempty"` +} + +// IBPCRStatus is the string that defines if status is set by the controller +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type IBPCRStatus string + +const ( + // True means that the status is set by the controller successfully + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + True IBPCRStatus = "True" + + // False stands for the status which is not correctly set and should be ignored + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + False IBPCRStatus = "False" + + // Unknown stands for unknown status + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Unknown IBPCRStatus = "Unknown" +) + +// IBPCRStatusType is the string that stores teh status +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type IBPCRStatusType string + +const ( + // Deploying is the status when component is being deployed + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Deploying IBPCRStatusType = "Deploying" + + // Deployed is the status when the component's deployment is done successfully + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Deployed IBPCRStatusType = "Deployed" + + // Precreated is the status of the orderers when they are waiting for config block + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Precreated IBPCRStatusType = "Precreated" + + // Error is the status when a component's deployment has failed due to an error + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Error IBPCRStatusType = "Error" + + // Warning is the status when a component is running, but will fail in future + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Warning IBPCRStatusType = "Warning" + + // Initializing is the status when a component is initializing + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Initializing IBPCRStatusType = "Initializing" +) + +// +k8s:deepcopy-gen=true +// CRStatus is the object that defines the status of a CR +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type CRStatus struct { + // Type is true or false based on if status is valid + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Type IBPCRStatusType `json:"type,omitempty"` + + // Status is defined based on the current status of the component + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Status IBPCRStatus `json:"status,omitempty"` + + // Reason provides a reason for an error + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Reason string `json:"reason,omitempty"` + + // Message provides a message for the status to be shown to customer + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Message string `json:"message,omitempty"` + + // LastHeartbeatTime is when the controller reconciled this component + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + LastHeartbeatTime string `json:"lastHeartbeatTime,omitempty"` + + // Version is the product (IBP) version of the component + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Version string `json:"version,omitempty"` + + // ErrorCode is the code of classification of errors + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + ErrorCode int `json:"errorcode,omitempty"` + + // Versions is the operand version of the component + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Versions CRStatusVersion `json:"versions,omitempty"` +} + +// CRStatusVersion provides the current reconciled version of the operand +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type CRStatusVersion struct { + // Reconciled provides the reconciled version of the operand + Reconciled string `json:"reconciled"` +} + +// HSM struct is DEPRECATED +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type HSM struct { + // PKCS11Endpoint is DEPRECATED + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PKCS11Endpoint string `json:"pkcs11endpoint,omitempty"` +} + +type CRN struct { + Version string `json:"version,omitempty"` + CName string `json:"c_name,omitempty"` + CType string `json:"c_type,omitempty"` + Servicename string `json:"service_name,omitempty"` + Location string `json:"location,omitempty"` + AccountID string `json:"account_id,omitempty"` + InstanceID string `json:"instance_id,omitempty"` + ResourceType string `json:"resource_type,omitempty"` + ResourceID string `json:"resource_id,omitempty"` +} + +// License should be accepted to install custom resources +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type License struct { + // Accept should be set to true to accept the license. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:checkbox" + // +kubebuilder:validation:Enum=true + Accept bool `json:"accept,omitempty"` +} + +// +k8s:deepcopy-gen=true +// SecretSpec defines the crypto spec to pass to components +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type SecretSpec struct { + // Enrollment defines enrollment part of secret spec + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Enrollment *EnrollmentSpec `json:"enrollment,omitempty"` + + // MSP defines msp part of secret spec + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MSP *MSPSpec `json:"msp,omitempty"` +} + +// CATLS contains the TLS CA certificate of the CA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CATLS struct { + // CACert is the base64 encoded certificate + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CACert string `json:"cacert,omitempty"` +} + +// +k8s:deepcopy-gen=true +// Enrollment is the enrollment section of secret spec +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type Enrollment struct { + // CAHost is host part of the CA to use + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CAHost string `json:"cahost,omitempty"` + + // CAPort is port of the CA to use + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CAPort string `json:"caport,omitempty"` + + // CAName is name of CA + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CAName string `json:"caname,omitempty"` + + // CATLS is tls details to talk to CA endpoint + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CATLS *CATLS `json:"catls,omitempty"` + + // EnrollID is the enrollment username + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollID string `json:"enrollid,omitempty"` + + // EnrollSecret is enrollment secret ( password ) + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollSecret string `json:"enrollsecret,omitempty"` + + // AdminCerts is the base64 encoded admincerts + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + AdminCerts []string `json:"admincerts,omitempty"` + + // CSR is the CSR override object + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CSR *CSR `json:"csr,omitempty"` +} + +// +k8s:deepcopy-gen=true +// EnrollmentSpec contains all the configurations that a component needs to enroll with +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type EnrollmentSpec struct { + // Component contains ecert enrollment details + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Component *Enrollment `json:"component,omitempty"` + + // TLS contains tls enrollment details + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLS *Enrollment `json:"tls,omitempty"` + + // ClientAuth contains client uath enrollment details + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ClientAuth *Enrollment `json:"clientauth,omitempty"` +} + +// +k8s:deepcopy-gen=true +// CSR has the Hosts for the CSR to be sent in the enrollment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CSR struct { + // Hosts override for CSR + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Hosts []string `json:"hosts,omitempty"` +} + +// +k8s:deepcopy-gen=true +// MSPSpec contains the configuration for the component to start with all the certificates +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type MSPSpec struct { + // Component contains crypto for ecerts + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Component *MSP `json:"component,omitempty"` + + // TLS contains crypto for tls certs + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLS *MSP `json:"tls,omitempty"` + + // ClientAuth contains crypto for client auth certs + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ClientAuth *MSP `json:"clientauth,omitempty"` +} + +// +k8s:deepcopy-gen=true +// MSP contains the common definitions crypto material for the component +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type MSP struct { + // KeyStore is base64 encoded private key + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + KeyStore string `json:"keystore,omitempty"` + + // SignCerts is base64 encoded sign cert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + SignCerts string `json:"signcerts,omitempty"` + + // CACerts is base64 encoded cacerts array + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CACerts []string `json:"cacerts,omitempty"` + + // IntermediateCerts is base64 encoded intermediate certs array + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + IntermediateCerts []string `json:"intermediatecerts,omitempty"` + + // AdminCerts is base64 encoded admincerts array + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + AdminCerts []string `json:"admincerts,omitempty"` +} diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go new file mode 100644 index 00000000..830c5226 --- /dev/null +++ b/api/v1beta1/groupversion_info.go @@ -0,0 +1,38 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package v1beta1 contains API Schema definitions for the ibp v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=ibp.com +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "ibp.com", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta1/ibpca.go b/api/v1beta1/ibpca.go new file mode 100644 index 00000000..9bddaf48 --- /dev/null +++ b/api/v1beta1/ibpca.go @@ -0,0 +1,274 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "os" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + corev1 "k8s.io/api/core/v1" +) + +// +kubebuilder:object:generate=false +type CAConfig interface { + UsingPKCS11() bool +} + +func (s *IBPCA) ResetRestart() { + s.Spec.Action.Restart = false +} + +func (s *IBPCA) ResetTLSRenew() { + s.Spec.Action.Renew.TLSCert = false +} + +func (s *IBPCA) UsingHSMProxy() bool { + if s.Spec.HSM != nil && s.Spec.HSM.PKCS11Endpoint != "" { + return true + } + return false +} + +func (s *IBPCA) IsHSMEnabled() bool { + return s.isCAHSMEnabled() || s.isTLSCAHSMEnabled() +} + +func (s *IBPCA) IsHSMEnabledForType(caType config.Type) bool { + switch caType { + case config.EnrollmentCA: + return s.isCAHSMEnabled() + case config.TLSCA: + return s.isTLSCAHSMEnabled() + } + return false +} + +func (s *IBPCA) isCAHSMEnabled() bool { + configOverride, err := s.Spec.GetCAConfigOverride() + if err != nil { + return false + } + + return configOverride.UsingPKCS11() +} + +func (s *IBPCA) isTLSCAHSMEnabled() bool { + configOverride, err := s.GetTLSCAConfigOverride() + if err != nil { + return false + } + + return configOverride.UsingPKCS11() +} + +func (s *IBPCA) GetTLSCAConfigOverride() (CAConfig, error) { + if s.Spec.ConfigOverride == nil || s.Spec.ConfigOverride.TLSCA == nil { + return &config.Config{}, nil + } + + configOverride, err := config.ReadFrom(&s.Spec.ConfigOverride.TLSCA.Raw) + if err != nil { + return nil, err + } + + return configOverride, nil +} + +func (s *IBPCA) GetNumSecondsWarningPeriod() int64 { + if s.Spec.NumSecondsWarningPeriod == 0 { + // Default to the equivalent of 30 days + daysToSecondsConversion := int64(24 * 60 * 60) + return 30 * daysToSecondsConversion + } + return s.Spec.NumSecondsWarningPeriod +} + +func (s *IBPCA) GetPullSecrets() []corev1.LocalObjectReference { + pullSecrets := []corev1.LocalObjectReference{} + for _, ps := range s.Spec.ImagePullSecrets { + pullSecrets = append(pullSecrets, corev1.LocalObjectReference{Name: ps}) + } + return pullSecrets +} + +func (s *IBPCA) GetRegistryURL() string { + return s.Spec.RegistryURL +} + +func (s *IBPCA) GetArch() []string { + return s.Spec.Arch +} + +func (s *IBPCA) GetLabels() map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app": s.GetName(), + "creator": label, + "release": "operator", + "helm.sh/chart": "ibm-" + label, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "ca", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +// GetFabricVersion returns fabric version from CR spec +func (s *IBPCA) GetFabricVersion() string { + return s.Spec.FabricVersion +} + +// SetFabricVersion sets fabric version on spec +func (s *IBPCA) SetFabricVersion(version string) { + s.Spec.FabricVersion = version +} + +// ImagesSet returns true if the spec has images defined +func (s *IBPCA) ImagesSet() bool { + return s.Spec.Images != nil +} + +// GetResource returns resources defined in spec for request component, if no resources +// defined returns blank but initialized instance of resources +func (s *IBPCA) GetResource(comp Component) corev1.ResourceRequirements { + if s.Spec.Resources != nil { + switch comp { + case INIT: + if s.Spec.Resources.Init != nil { + return *s.Spec.Resources.Init + } + case CA: + if s.Spec.Resources.CA != nil { + return *s.Spec.Resources.CA + } + case ENROLLER: + if s.Spec.Resources.EnrollJob != nil { + return *s.Spec.Resources.EnrollJob + } + case HSMDAEMON: + if s.Spec.Resources.HSMDaemon != nil { + return *s.Spec.Resources.HSMDaemon + } + } + } + + return corev1.ResourceRequirements{} +} + +// PVCName returns pvc name associated with instance +func (s *IBPCA) PVCName() string { + name := s.Name + "-pvc" + if s.Spec.CustomNames.PVC.CA != "" { + name = s.Spec.CustomNames.PVC.CA + } + return name +} + +// GetMSPID returns empty string as we don't currently store +// the orgname/MSPID of the CA in its spec +func (s *IBPCA) GetMSPID() string { + // no-op + return "" +} + +func (s *IBPCASpec) HSMSet() bool { + if s.HSM != nil && s.HSM.PKCS11Endpoint != "" { + return true + } + + return false +} + +func (s *IBPCASpec) DomainSet() bool { + if s.Domain != "" { + return true + } + + return false +} + +func (s *IBPCASpec) CAResourcesSet() bool { + if s.Resources != nil { + if s.Resources.CA != nil { + return true + } + } + + return false +} + +func (s *IBPCASpec) InitResourcesSet() bool { + if s.Resources != nil { + if s.Resources.Init != nil { + return true + } + } + + return false +} + +func (s *IBPCASpec) GetCAConfigOverride() (CAConfig, error) { + if s.ConfigOverride == nil || s.ConfigOverride.CA == nil { + return &config.Config{}, nil + } + + configOverride, err := config.ReadFrom(&s.ConfigOverride.CA.Raw) + if err != nil { + return nil, err + } + return configOverride, nil +} + +func (c *IBPCAStatus) HasType() bool { + if c.CRStatus.Type != "" { + return true + } + return false +} + +// Override will look at requested images and use those to override default image +// values. Override also format the image tag to include arch for non-sha based +// tags. +func (i *CAImages) Override(requested *CAImages, registryURL string, arch string) { + // If requested is nil, we are only interested in properly prepending registry + // URL to the image and with overriding default values so a empty struct is initialized. + if requested == nil { + requested = &CAImages{} + } + + // Images + i.CAInitImage = image.GetImage(registryURL, i.CAInitImage, requested.CAInitImage) + i.CAImage = image.GetImage(registryURL, i.CAImage, requested.CAImage) + i.HSMImage = image.GetImage(registryURL, i.HSMImage, requested.HSMImage) + i.EnrollerImage = image.GetImage(registryURL, i.EnrollerImage, requested.EnrollerImage) + + // Tags + i.CAInitTag = image.GetTag(arch, i.CAInitTag, requested.CAInitTag) + i.CATag = image.GetTag(arch, i.CATag, requested.CATag) + i.HSMTag = image.GetTag(arch, i.HSMTag, requested.HSMTag) + i.EnrollerTag = image.GetTag(arch, i.EnrollerTag, requested.EnrollerTag) +} + +func init() { + SchemeBuilder.Register(&IBPCA{}, &IBPCAList{}) +} diff --git a/api/v1beta1/ibpca_types.go b/api/v1beta1/ibpca_types.go new file mode 100644 index 00000000..36d27a44 --- /dev/null +++ b/api/v1beta1/ibpca_types.go @@ -0,0 +1,330 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// IBPCASpec defines the desired state of IBP CA +// +k8s:deepcopy-gen=true +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPCASpec struct { + // License should be accepted by the user to be able to setup CA + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + License License `json:"license"` + + /* generic configs - images/resources/storage/servicetype/version/replicas */ + + // Images (Optional) lists the images to be used for CA's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Images *CAImages `json:"images,omitempty"` + + // RegistryURL is registry url used to pull images + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + RegistryURL string `json:"registryURL,omitempty"` + + // ImagePullSecrets (Optional) is the list of ImagePullSecrets to be used for CA's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // Replicas (Optional - default 1) is the number of CA replicas to be setup + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Replicas *int32 `json:"replicas,omitempty"` + + // Resources (Optional) is the amount of resources to be provided to CA deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Resources *CAResources `json:"resources,omitempty"` + + // Service (Optional) is the override object for CA's service + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Service *Service `json:"service,omitempty"` + + // Storage (Optional - uses default storageclass if not provided) is the override object for CA's PVC config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Storage *CAStorages `json:"storage,omitempty"` + + /* CA specific configs */ + + // ConfigOverride (Optional) is the object to provide overrides to CA & TLSCA config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConfigOverride *ConfigOverride `json:"configoverride,omitempty"` + + // HSM (Optional) is DEPRECATED + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSM *HSM `json:"hsm,omitempty"` + + // CustomNames (Optional) is to use pre-configured resources for CA's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CustomNames CACustomNames `json:"customNames,omitempty"` + + // NumSecondsWarningPeriod (Optional - default 30 days) is used to define certificate expiry warning period. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NumSecondsWarningPeriod int64 `json:"numSecondsWarningPeriod,omitempty"` + + // FabricVersion (Optional) set the fabric version you want to use. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FabricVersion string `json:"version"` + + // Domain is the sub-domain used for CA's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Domain string `json:"domain,omitempty"` + + // Ingress (Optional) is ingress object for ingress overrides + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ingress Ingress `json:"ingress,omitempty"` + + /* cluster related configs */ + + // Arch (Optional) is the architecture of the nodes where CA should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Arch []string `json:"arch,omitempty"` + + // Region (Optional) is the region of the nodes where the CA should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Region string `json:"region,omitempty"` + + // Zone (Optional) is the zone of the nodes where the CA should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Zone string `json:"zone,omitempty"` + + // Action (Optional) is action object for trigerring actions + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Action CAAction `json:"action,omitempty"` +} + +// +k8s:deepcopy-gen=true +// ConfigOverride is the overrides to CA's & TLSCA's configuration +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConfigOverride struct { + // CA (Optional) is the overrides to CA's configuration + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + CA *runtime.RawExtension `json:"ca,omitempty"` + // TLSCA (Optional) is the overrides to TLSCA's configuration + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + TLSCA *runtime.RawExtension `json:"tlsca,omitempty"` + // MaxNameLength (Optional) is the maximum length of the name that the CA can have + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MaxNameLength *int `json:"maxnamelength,omitempty"` +} + +// +k8s:deepcopy-gen=true +// IBPCAStatus defines the observed state of IBPCA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPCAStatus struct { + // CRStatus is the status of the CA resource + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + CRStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +// +kubebuilder:storageversion +// Certificate Authorities issue certificates for all the identities to transact on the network. +// Warning: CA deployment using this tile is not supported. Please use the IBP Console to deploy a CA. +// +kubebuilder:subresource:status +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +// +operator-sdk:gen-csv:customresourcedefinitions.displayName="IBP CA" +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Deployments,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Ingresses,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`PersistentVolumeClaim,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Role,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`RoleBinding,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Route,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Services,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ServiceAccounts,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ConfigMaps,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Secrets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Pods,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Replicasets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPCA,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPPeer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPOrderer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPConsole,v1beta1,""` +type IBPCA struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Spec IBPCASpec `json:"spec,omitempty"` + + // Status is the observed state of IBPCA + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Status IBPCAStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +// IBPCAList contains a list of IBPCA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPCAList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IBPCA `json:"items"` +} + +// +k8s:deepcopy-gen=true +// CAResources is the overrides to the resources of the CA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAResources struct { + // Init is the resources provided to the init container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Init *corev1.ResourceRequirements `json:"init,omitempty"` + + // CA is the resources provided to the CA container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CA *corev1.ResourceRequirements `json:"ca,omitempty"` + + // EnrollJJob is the resources provided to the enroll job container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollJob *corev1.ResourceRequirements `json:"enrollJob,omitempty"` + + // HSMDaemon is the resources provided to the HSM daemon container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMDaemon *corev1.ResourceRequirements `json:"hsmDaemon,omitempty"` +} + +// +k8s:deepcopy-gen=true +// CAStorages is the overrides to the storage of the CA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAStorages struct { + // CA is the configuration of the storage of the CA + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CA *StorageSpec `json:"ca,omitempty"` +} + +// +k8s:deepcopy-gen=true +// CAConnectionProfile is the object for connection profile +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAConnectionProfile struct { + // Endpoints is the endpoints to talk to CA + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Endpoints CAEndpoints `json:"endpoints"` + + // TLS is the object with CA servers TLS information + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLS *ConnectionProfileTLS `json:"tls"` + + // CA is the object with CA crypto in connection profile + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CA *MSP `json:"ca"` + + // TLSCA is the object with tls CA crypto in connection profile + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCA *MSP `json:"tlsca"` +} + +// ConnectionProfileTLS is the object with CA servers TLS information +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConnectionProfileTLS struct { + + // Cert is the base64 encoded tls cert of CA server + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Cert string `json:"cert"` +} + +// CAEndpoints is the list of endpoints to communicate with the CA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAEndpoints struct { + // API is the endpoint to communicate with CA's API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + API string `json:"api"` + // Operations is the endpoint to communicate with CA's Operations API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Operations string `json:"operations"` +} + +// CAImages is the list of images to be used in CA deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAImages struct { + // CAImage is the name of the CA image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CAImage string `json:"caImage,omitempty"` + // CATag is the tag of the CA image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CATag string `json:"caTag,omitempty"` + // CAInitImage is the name of the Init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CAInitImage string `json:"caInitImage,omitempty"` + // CAInitTag is the tag of the Init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CAInitTag string `json:"caInitTag,omitempty"` + // HSMImage is the name of the HSM image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMImage string `json:"hsmImage,omitempty"` + // HSMTag is the tag of the HSM image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMTag string `json:"hsmTag,omitempty"` + // EnrollerImage is the name of the init image for crypto generation + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollerImage string `json:"enrollerImage,omitempty"` + // EnrollerTag is the tag of the init image for crypto generation + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollerTag string `json:"enrollerTag,omitempty"` +} + +// +k8s:deepcopy-gen=true +// CACustomNames is the list of preconfigured objects to be used for CA's deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CACustomNames struct { + // PVC is the list of PVC Names to be used for CA's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PVC CAPVCNames `json:"pvc,omitempty"` + // Sqlite is the sqlite path to be used for CA's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Sqlite string `json:"sqlitepath,omitempty"` +} + +// CAPVCNames is the list of PVC Names to be used for CA's deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAPVCNames struct { + // CA is the pvc to be used as CA's storage + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CA string `json:"ca,omitempty"` +} + +// CAAction contains actions that can be performed on CA +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type CAAction struct { + // Restart action is used to restart the running CA + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Restart bool `json:"restart,omitempty"` + + // Renew action is object for certificate renewals + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Renew Renew `json:"renew,omitempty"` +} + +// Renew is object for certificate renewals +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type Renew struct { + // TLSCert action is used to renew TLS crypto for CA server + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCert bool `json:"tlscert,omitempty"` +} diff --git a/api/v1beta1/ibpconsole.go b/api/v1beta1/ibpconsole.go new file mode 100644 index 00000000..52175b28 --- /dev/null +++ b/api/v1beta1/ibpconsole.go @@ -0,0 +1,130 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "encoding/json" + "strings" +) + +func (s *IBPConsole) ResetRestart() { + s.Spec.Action.Restart = false +} + +// GetMSPID returns empty string as no orgs are +// associated with console (implemented for the +// restart manager logic) +func (s *IBPConsole) GetMSPID() string { + // no-op + return "" +} + +func (s *IBPConsole) UseTags() bool { + useTags := false + if s.Spec.UseTags != nil && *(s.Spec.UseTags) { + useTags = *s.Spec.UseTags + } + return useTags +} + +func (s *IBPConsoleSpec) GetOverridesConsole() (*ConsoleOverridesConsole, error) { + override := &ConsoleOverridesConsole{} + if s.ConfigOverride != nil && s.ConfigOverride.Console != nil { + err := json.Unmarshal(s.ConfigOverride.Console.Raw, override) + if err != nil { + return nil, err + } + } + return override, nil +} + +func (s *IBPConsoleSpec) GetOverridesDeployer() (*ConsoleOverridesDeployer, error) { + override := &ConsoleOverridesDeployer{} + if s.ConfigOverride != nil && s.ConfigOverride.Deployer != nil { + err := json.Unmarshal(s.ConfigOverride.Deployer.Raw, override) + if err != nil { + return nil, err + } + } + return override, nil +} + +func (s *IBPConsoleSpec) UsingRemoteDB() bool { + if strings.Contains(s.ConnectionString, "localhost") || s.ConnectionString == "" { + return false + } + + return true +} + +func (v *Versions) Override(requestedVersions *Versions, registryURL string, arch string) { + if requestedVersions == nil { + return + } + + if len(requestedVersions.CA) != 0 { + CAVersions := map[string]VersionCA{} + for key, _ := range requestedVersions.CA { + var caConfig VersionCA + requestedCAVersion := requestedVersions.CA[key] + caConfig.Image.Override(&requestedCAVersion.Image, registryURL, arch) + caConfig.Default = requestedCAVersion.Default + caConfig.Version = requestedCAVersion.Version + CAVersions[key] = caConfig + } + v.CA = CAVersions + } + + if len(requestedVersions.Peer) != 0 { + PeerVersions := map[string]VersionPeer{} + for key, _ := range requestedVersions.Peer { + var peerConfig VersionPeer + requestedPeerVersion := requestedVersions.Peer[key] + peerConfig.Image.Override(&requestedPeerVersion.Image, registryURL, arch) + peerConfig.Default = requestedPeerVersion.Default + peerConfig.Version = requestedPeerVersion.Version + PeerVersions[key] = peerConfig + } + v.Peer = PeerVersions + } + + if len(requestedVersions.Orderer) != 0 { + OrdererVersions := map[string]VersionOrderer{} + for key, _ := range requestedVersions.Orderer { + var ordererConfig VersionOrderer + requestedOrdererVersion := requestedVersions.Orderer[key] + ordererConfig.Image.Override(&requestedOrdererVersion.Image, registryURL, arch) + ordererConfig.Default = requestedOrdererVersion.Default + ordererConfig.Version = requestedOrdererVersion.Version + OrdererVersions[key] = ordererConfig + } + v.Orderer = OrdererVersions + } +} + +func init() { + SchemeBuilder.Register(&IBPConsole{}, &IBPConsoleList{}) +} + +func (c *IBPConsoleStatus) HasType() bool { + if c.CRStatus.Type != "" { + return true + } + return false +} diff --git a/api/v1beta1/ibpconsole_types.go b/api/v1beta1/ibpconsole_types.go new file mode 100644 index 00000000..d6d3b9d0 --- /dev/null +++ b/api/v1beta1/ibpconsole_types.go @@ -0,0 +1,405 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +// IBPConsoleSpec defines the desired state of IBPConsole +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPConsoleSpec struct { + // License should be accepted by the user to be able to setup console + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + License License `json:"license"` + + // Images (Optional) lists the images to be used for console's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Images *ConsoleImages `json:"images,omitempty"` + + // ImagePullSecrets (Optional) is the list of ImagePullSecrets to be used for console's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // Replicas (Optional - default 1) is the number of console replicas to be setup + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Replicas *int32 `json:"replicas,omitempty"` + + // Resources (Optional) is the amount of resources to be provided to console deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Resources *ConsoleResources `json:"resources,omitempty"` + + // Service (Optional) is the override object for console's service + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Service *Service `json:"service,omitempty"` + + // ServiceAccountName defines serviceaccount used for console deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // Storage (Optional - uses default storageclass if not provided) is the override object for CA's PVC config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Storage *ConsoleStorage `json:"storage,omitempty"` + + // NetworkInfo is object for network overrides + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NetworkInfo *NetworkInfo `json:"networkinfo,omitempty"` + + // Ingress (Optional) is ingress object for ingress overrides + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ingress Ingress `json:"ingress,omitempty"` + + /* console settings */ + // AuthScheme is auth scheme for console access + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + AuthScheme string `json:"authScheme,omitempty"` + + // AllowDefaultPassword, if true, will bypass the password reset flow + // on the first connection to the console GUI. By default (false), all + // consoles require a password reset at the first login. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + AllowDefaultPassword bool `json:"allowDefaultPassword,omitempty"` + + // Components is database name used for components + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Components string `json:"components,omitempty"` + + // ClusterData is object cluster data information + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ClusterData *consolev1.IBPConsoleClusterData `json:"clusterdata,omitempty"` + + // ConfigtxlatorURL is url for configtxlator server + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConfigtxlatorURL string `json:"configtxlator,omitempty"` + + // ConnectionString is connection url for backend database + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConnectionString string `json:"connectionString,omitempty"` + + // DeployerTimeout is timeout value for deployer calls + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DeployerTimeout int32 `json:"deployerTimeout,omitempty"` + + // DeployerURL is url for deployer server + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DeployerURL string `json:"deployerUrl,omitempty"` + + // Email is the email used for initial access + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Email string `json:"email,omitempty"` + + // FeatureFlags is object for feature flag settings + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FeatureFlags *consolev1.FeatureFlags `json:"featureflags,omitempty"` + + IAMApiKey string `json:"iamApiKey,omitempty"` + SegmentWriteKey string `json:"segmentWriteKey,omitempty"` + IBMID *consolev1.IBMID `json:"ibmid,omitempty"` + Proxying *bool `json:"proxying,omitempty"` + + // Password is initial password to access console + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Password string `json:"password,omitempty"` + + // PasswordSecretName is secretname where password is stored + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PasswordSecretName string `json:"passwordSecretName,omitempty"` + + // Sessions is sessions database name to use + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Sessions string `json:"sessions,omitempty"` + + // System is system database name to use + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + System string `json:"system,omitempty"` + + // SystemChannel is default systemchannel name + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + SystemChannel string `json:"systemChannel,omitempty"` + + // TLSSecretName is secret name to load custom tls certs + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSSecretName string `json:"tlsSecretName,omitempty"` + + CRN *CRN `json:"crn,omitempty"` + Kubeconfig *[]byte `json:"kubeconfig,omitempty"` + KubeconfigSecretName string `json:"kubeconfigsecretname,omitempty"` + Versions *Versions `json:"versions,omitempty"` + KubeconfigNamespace string `json:"kubeconfignamespace,omitempty"` + + // RegistryURL is registry url used to pull images + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + RegistryURL string `json:"registryURL,omitempty"` + + // Deployer is object for deployer configs + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Deployer *Deployer `json:"deployer,omitempty"` + + // Arch (Optional) is the architecture of the nodes where console should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Arch []string `json:"arch,omitempty"` + + // Region (Optional) is the region of the nodes where the console should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Region string `json:"region,omitempty"` + + // Zone (Optional) is the zone of the nodes where the console should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Zone string `json:"zone,omitempty"` + + // ConfigOverride (Optional) is the object to provide overrides + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConfigOverride *ConsoleOverrides `json:"configoverride,omitempty"` + + // Action (Optional) is action object for trigerring actions + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Action ConsoleAction `json:"action,omitempty"` + + // Version (Optional) is version for the console + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Version string `json:"version"` + + // UseTags (Optional) is a flag to switch between image digests and tags + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + UseTags *bool `json:"usetags"` +} + +// +k8s:deepcopy-gen=true +// ConsoleOverrides is the overrides to console configuration +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConsoleOverrides struct { + // Console is the overrides to console configuration + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Console *runtime.RawExtension `json:"console,omitempty"` + + // Deployer is the overrides to deployer configuration + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Deployer *runtime.RawExtension `json:"deployer,omitempty"` + + // MaxNameLength (Optional) is the maximum length of the name that the console can have + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MaxNameLength *int `json:"maxnamelength,omitempty"` +} + +// +k8s:deepcopy-gen=true +type ConsoleOverridesConsole struct { + HostURL string `json:"hostURL,omitempty"` + ActivityTrackerConsolePath string `json:"activityTrackerConsolePath,omitempty"` + ActivityTrackerHostPath string `json:"activityTrackerHostPath,omitempty"` + HSM string `json:"hsm"` +} + +// +k8s:deepcopy-gen=true +type ConsoleOverridesDeployer struct { + Timeouts *DeployerTimeouts `json:"timeouts,omitempty"` +} + +// +k8s:deepcopy-gen=true +type Versions struct { + CA map[string]VersionCA `json:"ca"` + Peer map[string]VersionPeer `json:"peer"` + Orderer map[string]VersionOrderer `json:"orderer"` +} + +type VersionCA struct { + Default bool `json:"default"` + Version string `json:"version"` + Image CAImages `json:"image,omitempty"` +} + +type VersionOrderer struct { + Default bool `json:"default"` + Version string `json:"version"` + Image OrdererImages `json:"image,omitempty"` +} +type VersionPeer struct { + Default bool `json:"default"` + Version string `json:"version"` + Image PeerImages `json:"image,omitempty"` +} + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +// IBPConsoleStatus defines the observed state of IBP Console +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type IBPConsoleStatus struct { + CRStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +k8s:deepcopy-gen=true +// The Console is used to deploy and manage the CA, peer, ordering nodes. +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +// +operator-sdk:gen-csv:customresourcedefinitions.displayName="IBP Console" +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Deployments,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Ingresses,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`PersistentVolumeClaim,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Role,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`RoleBinding,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Route,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Services,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ServiceAccounts,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ConfigMaps,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Secrets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Pods,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Replicasets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPCA,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPPeer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPOrderer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPConsole,v1beta1,""` +type IBPConsole struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Spec IBPConsoleSpec `json:"spec,omitempty"` + + // Status is the observed state of IBPConsole + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Status IBPConsoleStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +// IBPConsoleList contains a list of IBP Console +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IBPConsole `json:"items"` +} + +// +k8s:deepcopy-gen=true +// ConsoleResources is the overrides to the resources of the Console +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConsoleResources struct { + // Init is the resources provided to the init container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Init *corev1.ResourceRequirements `json:"init,omitempty"` + + // CouchDB is the resources provided to the couchdb container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CouchDB *corev1.ResourceRequirements `json:"couchdb,omitempty"` + + // Console is the resources provided to the console container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Console *corev1.ResourceRequirements `json:"console,omitempty"` + + // Deployer is the resources provided to the deployer container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Deployer *corev1.ResourceRequirements `json:"deployer,omitempty"` + + // Configtxlator is the resources provided to the configtxlator container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Configtxlator *corev1.ResourceRequirements `json:"configtxlator,omitempty"` +} + +// +k8s:deepcopy-gen=true +// ConsoleStorage is the overrides to the storage of the console +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConsoleStorage struct { + // Console is the configuration of the storage of the console + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Console *StorageSpec `json:"console,omitempty"` +} + +// +k8s:deepcopy-gen=true +// ConsoleImages is the list of images to be used in console deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConsoleImages struct { + // ConsoleInitImage is the name of the console init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConsoleInitImage string `json:"consoleInitImage,omitempty"` + + // ConsoleInitTag is the tag of the console init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConsoleInitTag string `json:"consoleInitTag,omitempty"` + + // ConsoleImage is the name of the console image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConsoleImage string `json:"consoleImage,omitempty"` + + // ConsoleTag is the tag of the console image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConsoleTag string `json:"consoleTag,omitempty"` + + // ConfigtxlatorImage is the name of the configtxlator image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConfigtxlatorImage string `json:"configtxlatorImage,omitempty"` + + // ConfigtxlatorTag is the tag of the configtxlator image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ConfigtxlatorTag string `json:"configtxlatorTag,omitempty"` + + // DeployerImage is the name of the deployer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DeployerImage string `json:"deployerImage,omitempty"` + + // DeployerTag is the tag of the deployer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DeployerTag string `json:"deployerTag,omitempty"` + + // CouchDBImage is the name of the couchdb image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CouchDBImage string `json:"couchdbImage,omitempty"` + + // CouchDBTag is the tag of the couchdb image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CouchDBTag string `json:"couchdbTag,omitempty"` + + // MustgatherImage is the name of the mustgather image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MustgatherImage string `json:"mustgatherImage,omitempty"` + + // MustgatherTag is the tag of the mustgatherTag image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MustgatherTag string `json:"mustgatherTag,omitempty"` +} + +type Deployer struct { + Domain string `json:"domain,omitempty"` + ConnectionString string `json:"connectionstring,omitempty"` + ComponentsDB string `json:"components_db,omitempty"` + CreateDB bool `json:"create_db,omitempty"` +} + +type DeployerTimeouts struct { + Deployment int `json:"componentDeploy"` + APIServer int `json:"apiServer"` +} + +// ConsoleAction contains actions that can be performed on console +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ConsoleAction struct { + Restart bool `json:"restart,omitempty"` +} diff --git a/api/v1beta1/ibporderer.go b/api/v1beta1/ibporderer.go new file mode 100644 index 00000000..7324ec97 --- /dev/null +++ b/api/v1beta1/ibporderer.go @@ -0,0 +1,300 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + v24config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + "github.com/IBM-Blockchain/fabric-operator/version" + corev1 "k8s.io/api/core/v1" +) + +// +kubebuilder:object:generate=false +type OrdererConfig interface { + UsingPKCS11() bool +} + +func (s *IBPOrderer) ResetRestart() { + s.Spec.Action.Restart = false +} + +func (s *IBPOrderer) ResetEcertReenroll() { + s.Spec.Action.Reenroll.Ecert = false + s.Spec.Action.Reenroll.EcertNewKey = false +} + +func (s *IBPOrderer) ResetTLSReenroll() { + s.Spec.Action.Reenroll.TLSCert = false + s.Spec.Action.Reenroll.TLSCertNewKey = false +} + +func (s *IBPOrderer) ResetEcertEnroll() { + s.Spec.Action.Enroll.Ecert = false +} + +func (s *IBPOrderer) ResetTLSEnroll() { + s.Spec.Action.Enroll.TLSCert = false +} + +func (o *IBPOrderer) IsHSMEnabled() bool { + ordererConfig, err := o.GetConfigOverride() + if err != nil { + return false + } + + return ordererConfig.(OrdererConfig).UsingPKCS11() +} + +func (o *IBPOrderer) ClientAuthCryptoSet() bool { + secret := o.Spec.Secret + if secret != nil { + if secret.MSP != nil && secret.MSP.ClientAuth != nil { + return true + } + if secret.Enrollment != nil && secret.Enrollment.ClientAuth != nil { + return true + } + } + + return false +} + +func (o *IBPOrderer) UsingHSMProxy() bool { + if o.Spec.HSM != nil && o.Spec.HSM.PKCS11Endpoint != "" { + return true + } + return false +} + +func (o *IBPOrderer) GetConfigOverride() (interface{}, error) { + switch version.GetMajorReleaseVersion(o.Spec.FabricVersion) { + case version.V2: + currentVer := version.String(o.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if o.Spec.ConfigOverride == nil { + return &v24config.Orderer{}, nil + } + + configOverride, err := v24config.ReadFrom(&o.Spec.ConfigOverride.Raw) + if err != nil { + return nil, err + } + return configOverride, nil + } else { + if o.Spec.ConfigOverride == nil { + return &v2config.Orderer{}, nil + } + + configOverride, err := v2config.ReadFrom(&o.Spec.ConfigOverride.Raw) + if err != nil { + return nil, err + } + return configOverride, nil + } + + case version.V1: + fallthrough + default: + if o.Spec.ConfigOverride == nil { + return &config.Orderer{}, nil + } + + configOverride, err := config.ReadFrom(&o.Spec.ConfigOverride.Raw) + if err != nil { + return nil, err + } + return configOverride, nil + } +} + +func (o *IBPOrderer) UsingHSMImage() bool { + if o.Spec.Images != nil && o.Spec.Images.HSMImage != "" { + return true + } + return false +} + +func (o *IBPOrderer) EnrollerImage() string { + return image.Format(o.Spec.Images.EnrollerImage, o.Spec.Images.EnrollerTag) +} + +func (s *IBPOrderer) GetPullSecrets() []corev1.LocalObjectReference { + pullSecrets := []corev1.LocalObjectReference{} + for _, ps := range s.Spec.ImagePullSecrets { + pullSecrets = append(pullSecrets, corev1.LocalObjectReference{Name: ps}) + } + return pullSecrets +} + +func (s *IBPOrderer) GetRegistryURL() string { + return s.Spec.RegistryURL +} + +func (s *IBPOrderer) GetArch() []string { + return s.Spec.Arch +} + +// GetFabricVersion returns fabric version from CR spec +func (s *IBPOrderer) GetFabricVersion() string { + return s.Spec.FabricVersion +} + +// SetFabricVersion sets fabric version on spec +func (s *IBPOrderer) SetFabricVersion(version string) { + s.Spec.FabricVersion = version +} + +// ImagesSet returns true if the spec has images defined +func (s *IBPOrderer) ImagesSet() bool { + return s.Spec.Images != nil +} + +// GetResource returns resources defined in spec for request component, if no resources +// defined returns blank but initialized instance of resources +func (s *IBPOrderer) GetResource(comp Component) corev1.ResourceRequirements { + if s.Spec.Resources != nil { + switch comp { + case INIT: + if s.Spec.Resources.Init != nil { + return *s.Spec.Resources.Init + } + case ORDERER: + if s.Spec.Resources.Orderer != nil { + return *s.Spec.Resources.Orderer + } + case GRPCPROXY: + if s.Spec.Resources.GRPCProxy != nil { + return *s.Spec.Resources.GRPCProxy + } + case ENROLLER: + if s.Spec.Resources.Enroller != nil { + return *s.Spec.Resources.Enroller + } + case HSMDAEMON: + if s.Spec.Resources.HSMDaemon != nil { + return *s.Spec.Resources.HSMDaemon + } + } + } + + return corev1.ResourceRequirements{} +} + +// PVCName returns pvc name associated with instance +func (s *IBPOrderer) PVCName() string { + name := s.Name + "-pvc" + if s.Spec.CustomNames.PVC.Orderer != "" { + name = s.Spec.CustomNames.PVC.Orderer + } + return name +} + +func (s *IBPOrderer) GetMSPID() string { + return s.Spec.MSPID +} + +func (s *IBPOrdererSpec) NodeOUDisabled() bool { + if s.DisableNodeOU != nil { + return *s.DisableNodeOU + } + + return false +} + +func (s *IBPOrdererSpec) HSMSet() bool { + if s.HSM != nil && s.HSM.PKCS11Endpoint != "" { + return true + } + + return false +} + +func (s *IBPOrdererSpec) DomainSet() bool { + if s.Domain != "" { + return true + } + + return false +} + +func (s *IBPOrdererSpec) IsPrecreateOrderer() bool { + if s.IsPrecreate == nil { + return false + } + + if *s.IsPrecreate == BoolTrue { + return true + } + + return false +} + +func (s *IBPOrdererSpec) IsUsingChannelLess() bool { + if s.UseChannelLess == nil { + return false + } + + if *s.UseChannelLess == BoolTrue { + return true + } + + return false +} + +func (s *IBPOrdererSpec) GetNumSecondsWarningPeriod() int64 { + daysToSecondsConversion := int64(24 * 60 * 60) + if s.NumSecondsWarningPeriod == 0 { + // Default to the equivalent of 30 days + return 30 * daysToSecondsConversion + } + return s.NumSecondsWarningPeriod +} + +func (i *OrdererImages) Override(requested *OrdererImages, registryURL string, arch string) { + if requested == nil { + requested = &OrdererImages{} + } + + // Images + i.GRPCWebImage = image.GetImage(registryURL, i.GRPCWebImage, requested.GRPCWebImage) + i.OrdererInitImage = image.GetImage(registryURL, i.OrdererInitImage, requested.OrdererInitImage) + i.OrdererImage = image.GetImage(registryURL, i.OrdererImage, requested.OrdererImage) + i.HSMImage = image.GetImage(registryURL, i.HSMImage, requested.HSMImage) + i.EnrollerImage = image.GetImage(registryURL, i.EnrollerImage, requested.EnrollerImage) + + // Tags + i.GRPCWebTag = image.GetTag(arch, i.GRPCWebTag, requested.GRPCWebTag) + i.OrdererInitTag = image.GetTag(arch, i.OrdererInitTag, requested.OrdererInitTag) + i.OrdererTag = image.GetTag(arch, i.OrdererTag, requested.OrdererTag) + i.HSMTag = image.GetTag(arch, i.HSMTag, requested.HSMTag) + i.EnrollerTag = image.GetTag(arch, i.EnrollerTag, requested.EnrollerTag) +} + +func init() { + SchemeBuilder.Register(&IBPOrderer{}, &IBPOrdererList{}) +} + +func (o *IBPOrdererStatus) HasType() bool { + if o.CRStatus.Type != "" { + return true + } + return false +} diff --git a/api/v1beta1/ibporderer_types.go b/api/v1beta1/ibporderer_types.go new file mode 100644 index 00000000..b7f91549 --- /dev/null +++ b/api/v1beta1/ibporderer_types.go @@ -0,0 +1,415 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +// IBPOrdererSpec defines the desired state of IBPOrderer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPOrdererSpec struct { + // License should be accepted by the user to be able to setup orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + License License `json:"license"` + + // Images (Optional) lists the images to be used for orderer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Images *OrdererImages `json:"images,omitempty"` + + // RegistryURL is registry url used to pull images + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + RegistryURL string `json:"registryURL,omitempty"` + + // ImagePullSecrets (Optional) is the list of ImagePullSecrets to be used for orderer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // Replicas (Optional - default 1) is the number of orderer replicas to be setup + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Replicas *int32 `json:"replicas,omitempty"` + + // Resources (Optional) is the amount of resources to be provided to orderer deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Resources *OrdererResources `json:"resources,omitempty"` + + // Service (Optional) is the override object for orderer's service + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Service *Service `json:"service,omitempty"` + + // Storage (Optional - uses default storageclass if not provided) is the override object for CA's PVC config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Storage *OrdererStorages `json:"storage,omitempty"` + + // GenesisBlock (Optional) is genesis block to start the orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GenesisBlock string `json:"genesisBlock,omitempty"` + GenesisProfile string `json:"genesisProfile,omitempty"` + UseChannelLess *bool `json:"useChannelLess,omitempty"` + + // MSPID is the msp id of the orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MSPID string `json:"mspID,omitempty"` + + // OrdererType is type of orderer you want to start + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + OrdererType string `json:"ordererType,omitempty"` + + // OrgName is the organization name of the orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + OrgName string `json:"orgName,omitempty"` + + // SystemChannelName is the name of systemchannel + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + SystemChannelName string `json:"systemChannelName,omitempty"` + + // Secret is object for msp crypto + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Secret *SecretSpec `json:"secret,omitempty"` + + // ConfigOverride (Optional) is the object to provide overrides to core yaml config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + ConfigOverride *runtime.RawExtension `json:"configoverride,omitempty"` + + // HSM (Optional) is DEPRECATED + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSM *HSM `json:"hsm,omitempty"` + + // IsPrecreate (Optional) defines if orderer is in precreate state + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + IsPrecreate *bool `json:"isprecreate,omitempty"` + + // FabricVersion (Optional) is fabric version for the orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FabricVersion string `json:"version"` + + // NumSecondsWarningPeriod (Optional - default 30 days) is used to define certificate expiry warning period. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NumSecondsWarningPeriod int64 `json:"numSecondsWarningPeriod,omitempty"` + + // ClusterSize (Optional) number of orderers if a cluster + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ClusterSize int `json:"clusterSize,omitempty"` + + // ClusterLocation (Optional) is array of cluster location settings for cluster + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ClusterLocation []IBPOrdererClusterLocation `json:"location,omitempty"` + + // ClusterConfigOverride (Optional) is array of config overrides for cluster + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:pruning:PreserveUnknownFields + ClusterConfigOverride []*runtime.RawExtension `json:"clusterconfigoverride,omitempty"` + + // ClusterSecret (Optional) is array of msp crypto for cluster + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ClusterSecret []*SecretSpec `json:"clustersecret,omitempty"` + + // NodeNumber (Optional) is the number of this node in cluster - used internally + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NodeNumber *int `json:"number,omitempty"` + + // Ingress (Optional) is ingress object for ingress overrides + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ingress Ingress `json:"ingress,omitempty"` + + // Domain is the sub-domain used for orderer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Domain string `json:"domain,omitempty"` + + // Arch (Optional) is the architecture of the nodes where orderer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Arch []string `json:"arch,omitempty"` + + // Zone (Optional) is the zone of the nodes where the orderer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Zone string `json:"zone,omitempty"` + + // Region (Optional) is the region of the nodes where the orderer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Region string `json:"region,omitempty"` + + // DisableNodeOU (Optional) is used to switch nodeou on and off + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DisableNodeOU *bool `json:"disablenodeou,omitempty"` + + // CustomNames (Optional) is to use pre-configured resources for orderer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CustomNames OrdererCustomNames `json:"customNames,omitempty"` + + // Action (Optional) is object for orderer actions + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Action OrdererAction `json:"action,omitempty"` + + // ExternalAddress (Optional) is used internally + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ExternalAddress string `json:"externalAddress,omitempty"` +} + +// IBPOrdererClusterLocation (Optional) is object of cluster location settings for cluster +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPOrdererClusterLocation struct { + // Zone (Optional) is the zone of the nodes where the orderer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Zone string `json:"zone,omitempty"` + + // Region (Optional) is the region of the nodes where the orderer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Region string `json:"region,omitempty"` +} + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +// IBPOrdererStatus defines the observed state of IBPOrderer +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type IBPOrdererStatus struct { + CRStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +k8s:deepcopy-gen=true +// Ordering nodes create the blocks that form the ledger and send them to peers. +// Warning: Orderer deployment using this tile is not supported. Please use the IBP Console to deploy an orderer. +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +// +operator-sdk:gen-csv:customresourcedefinitions.displayName="IBP Orderer" +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Deployments,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Ingresses,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`PersistentVolumeClaim,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Role,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`RoleBinding,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Route,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Services,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ServiceAccounts,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ConfigMaps,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Secrets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Pods,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Replicasets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPCA,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPPeer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPOrderer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPConsole,v1beta1,""` +type IBPOrderer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IBPOrdererSpec `json:"spec,omitempty"` + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Status IBPOrdererStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +// IBPOrdererList contains a list of IBPOrderer +type IBPOrdererList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IBPOrderer `json:"items"` +} + +// +k8s:deepcopy-gen=true +// OrdererResources is the overrides to the resources of the orderer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererResources struct { + // Init (Optional) is the resources provided to the init container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Init *corev1.ResourceRequirements `json:"init,omitempty"` + + // Orderer (Optional) is the resources provided to the orderer container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Orderer *corev1.ResourceRequirements `json:"orderer,omitempty"` + + // GRPCProxy (Optional) is the resources provided to the proxy container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GRPCProxy *corev1.ResourceRequirements `json:"proxy,omitempty"` + + // Enroller (Optional) is the resources provided to the enroller container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Enroller *corev1.ResourceRequirements `json:"enroller,omitempty"` + + // HSMDaemon (Optional) is the resources provided to the HSM Daemon container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMDaemon *corev1.ResourceRequirements `json:"hsmdaemon,omitempty"` +} + +// OrdererImages is the list of images to be used in orderer deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererImages struct { + // OrdererInitImage is the name of the orderer init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + OrdererInitImage string `json:"ordererInitImage,omitempty"` + + // OrdererInitTag is the tag of the orderer init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + OrdererInitTag string `json:"ordererInitTag,omitempty"` + + // OrdererImage is the name of the orderer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + OrdererImage string `json:"ordererImage,omitempty"` + + // OrdererTag is the tag of the orderer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + OrdererTag string `json:"ordererTag,omitempty"` + + // GRPCWebImage is the name of the grpc web proxy image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GRPCWebImage string `json:"grpcwebImage,omitempty"` + + // GRPCWebTag is the tag of the grpc web proxy image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GRPCWebTag string `json:"grpcwebTag,omitempty"` + + // HSMImage is the name of the hsm image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMImage string `json:"hsmImage,omitempty"` + + // HSMTag is the tag of the hsm image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMTag string `json:"hsmTag,omitempty"` + + // EnrollerImage is the name of the init image for crypto generation + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollerImage string `json:"enrollerImage,omitempty"` + + // EnrollerTag is the tag of the init image for crypto generation + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollerTag string `json:"enrollerTag,omitempty"` +} + +// +k8s:deepcopy-gen=true +// OrdererStorages is the overrides to the storage of the orderer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererStorages struct { + // Orderer (Optional) is the configuration of the storage of the orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Orderer *StorageSpec `json:"orderer,omitempty"` +} + +// +k8s:deepcopy-gen=true +// OrdererConnectionProfile provides necessary information to connect to the orderer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererConnectionProfile struct { + // Endpoints is list of endpoints to communicate with the orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Endpoints OrdererEndpoints `json:"endpoints"` + + // TLS is object with tls crypto material for orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLS *MSP `json:"tls"` + + // Component is object with ecert crypto material for orderer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Component *MSP `json:"component"` +} + +// OrdererEndpoints is the list of endpoints to communicate with the orderer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererEndpoints struct { + // API is the endpoint to communicate with orderer's API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + API string `json:"api"` + + // Operations is the endpoint to communicate with orderer's Operations API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Operations string `json:"operations"` + + // Grpcweb is the endpoint to communicate with orderer's grpcweb proxy API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Grpcweb string `json:"grpcweb"` + + // Admin is the endpoint to communicate with orderer's admin service API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Admin string `json:"admin"` +} + +// +k8s:deepcopy-gen=true +// OrdererCustomNames is the list of preconfigured objects to be used for orderer's deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererCustomNames struct { + // PVC is the list of PVC Names to be used for orderer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PVC OrdererPVCNames `json:"pvc,omitempty"` +} + +// OrdererPVCNames is the list of PVC Names to be used for orderer's deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererPVCNames struct { + // Orderer is the pvc to be used as orderer's storage + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Orderer string `json:"orderer,omitempty"` +} + +// Action contains actions that can be performed on orderer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererAction struct { + // Restart action is used to restart orderer deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Restart bool `json:"restart,omitempty"` + + // Reenroll contains actions for triggering crypto reenroll + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Reenroll OrdererReenrollAction `json:"reenroll,omitempty"` + + // Enroll contains actions for triggering crypto enroll + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Enroll OrdererEnrollAction `json:"enroll,omitempty"` +} + +// OrdererReenrollAction contains actions for reenrolling crypto +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererReenrollAction struct { + // Ecert is used to trigger reenroll for ecert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ecert bool `json:"ecert,omitempty"` + + // EcertNewKey is used to trigger reenroll for ecert and also generating + // a new private key + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EcertNewKey bool `json:"ecertNewKey,omitempty"` + + // TLSCert is used to trigger reenroll for tlscert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCert bool `json:"tlscert,omitempty"` + + // TLSCertNewKey is used to trigger reenroll for tlscert and also generating + // a new private key + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCertNewKey bool `json:"tlscertNewKey,omitempty"` +} + +// OrdererEnrollAction contains actions for enrolling crypto +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type OrdererEnrollAction struct { + // Ecert is used to trigger enroll for ecert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ecert bool `json:"ecert,omitempty"` + + // TLSCert is used to trigger enroll for tls certs + TLSCert bool `json:"tlscert,omitempty"` +} diff --git a/api/v1beta1/ibppeer.go b/api/v1beta1/ibppeer.go new file mode 100644 index 00000000..6fe512de --- /dev/null +++ b/api/v1beta1/ibppeer.go @@ -0,0 +1,326 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "strings" + + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + "github.com/IBM-Blockchain/fabric-operator/version" + corev1 "k8s.io/api/core/v1" +) + +// +kubebuilder:object:generate=false + +type CoreConfig interface { + UsingPKCS11() bool +} + +func (s *IBPPeer) ResetRestart() { + s.Spec.Action.Restart = false +} + +func (s *IBPPeer) ResetEcertReenroll() { + s.Spec.Action.Reenroll.Ecert = false + s.Spec.Action.Reenroll.EcertNewKey = false +} + +func (s *IBPPeer) ResetTLSReenroll() { + s.Spec.Action.Reenroll.TLSCert = false + s.Spec.Action.Reenroll.TLSCertNewKey = false +} + +func (s *IBPPeer) ResetEcertEnroll() { + s.Spec.Action.Enroll.Ecert = false +} + +func (s *IBPPeer) ResetTLSEnroll() { + s.Spec.Action.Enroll.TLSCert = false +} + +func (s *IBPPeer) ResetUpgradeDBs() { + s.Spec.Action.UpgradeDBs = false +} + +func (p *IBPPeer) ClientAuthCryptoSet() bool { + secret := p.Spec.Secret + if secret != nil { + if secret.MSP != nil && secret.MSP.ClientAuth != nil { + return true + } + if secret.Enrollment != nil && secret.Enrollment.ClientAuth != nil { + return true + } + } + + return false +} + +func (p *IBPPeer) UsingHSMProxy() bool { + if p.Spec.HSM != nil && p.Spec.HSM.PKCS11Endpoint != "" { + return true + } + return false +} + +func (p *IBPPeer) UsingHSMImage() bool { + if p.Spec.Images != nil && p.Spec.Images.HSMImage != "" { + return true + } + return false +} + +func (p *IBPPeer) UsingCCLauncherImage() bool { + if p.Spec.Images != nil && p.Spec.Images.CCLauncherImage != "" { + return true + } + + return false +} + +func (p *IBPPeer) EnrollerImage() string { + return image.Format(p.Spec.Images.EnrollerImage, p.Spec.Images.EnrollerTag) +} + +func (s *IBPPeer) GetConfigOverride() (interface{}, error) { + switch version.GetMajorReleaseVersion(s.Spec.FabricVersion) { + case version.V2: + if s.Spec.ConfigOverride == nil { + return &v2config.Core{}, nil + } + + configOverride, err := v2config.ReadFrom(&s.Spec.ConfigOverride.Raw) + if err != nil { + return nil, err + } + return configOverride, nil + case version.V1: + fallthrough + default: + if s.Spec.ConfigOverride == nil { + return &config.Core{}, nil + } + + configOverride, err := config.ReadFrom(&s.Spec.ConfigOverride.Raw) + if err != nil { + return nil, err + } + return configOverride, nil + } +} + +func (s *IBPPeer) IsHSMEnabled() bool { + configOverride, err := s.GetConfigOverride() + if err != nil { + return false + } + + return configOverride.(CoreConfig).UsingPKCS11() +} + +func (s *IBPPeer) UsingCouchDB() bool { + if strings.ToLower(s.Spec.StateDb) == "couchdb" { + return true + } + + return false +} + +func (s *IBPPeer) GetPullSecrets() []corev1.LocalObjectReference { + pullSecrets := []corev1.LocalObjectReference{} + for _, ps := range s.Spec.ImagePullSecrets { + pullSecrets = append(pullSecrets, corev1.LocalObjectReference{Name: ps}) + } + return pullSecrets +} + +func (s *IBPPeer) GetRegistryURL() string { + return s.Spec.RegistryURL +} + +func (s *IBPPeer) GetArch() []string { + return s.Spec.Arch +} + +// GetFabricVersion returns fabric version from CR spec +func (s *IBPPeer) GetFabricVersion() string { + return s.Spec.FabricVersion +} + +// SetFabricVersion sets fabric version on spec +func (s *IBPPeer) SetFabricVersion(version string) { + s.Spec.FabricVersion = version +} + +// ImagesSet returns true if the spec has images defined +func (s *IBPPeer) ImagesSet() bool { + return s.Spec.Images != nil +} + +// GetResource returns resources defined in spec for request component, if no resources +// defined returns blank but initialized instance of resources +func (s *IBPPeer) GetResource(comp Component) corev1.ResourceRequirements { + if s.Spec.Resources != nil { + switch comp { + case INIT: + if s.Spec.Resources.Init != nil { + return *s.Spec.Resources.Init + } + case PEER: + if s.Spec.Resources.Peer != nil { + return *s.Spec.Resources.Peer + } + case GRPCPROXY: + if s.Spec.Resources.GRPCProxy != nil { + return *s.Spec.Resources.GRPCProxy + } + case FLUENTD: + if s.Spec.Resources.FluentD != nil { + return *s.Spec.Resources.FluentD + } + case DIND: + if s.Spec.Resources.DinD != nil { + return *s.Spec.Resources.DinD + } + case COUCHDB: + if s.Spec.Resources.CouchDB != nil { + return *s.Spec.Resources.CouchDB + } + case CCLAUNCHER: + if s.Spec.Resources.CCLauncher != nil { + return *s.Spec.Resources.CCLauncher + } + case ENROLLER: + if s.Spec.Resources.Enroller != nil { + return *s.Spec.Resources.Enroller + } + case HSMDAEMON: + if s.Spec.Resources.HSMDaemon != nil { + return *s.Spec.Resources.HSMDaemon + } + } + } + + return corev1.ResourceRequirements{} +} + +// PVCName returns pvc name associated with instance +func (s *IBPPeer) PVCName() string { + name := s.Name + "-pvc" + if s.Spec.CustomNames.PVC.Peer != "" { + name = s.Spec.CustomNames.PVC.Peer + } + return name +} + +func (s *IBPPeer) GetMSPID() string { + return s.Spec.MSPID +} + +func (s *IBPPeerSpec) NodeOUDisabled() bool { + if s.DisableNodeOU != nil { + return *s.DisableNodeOU + } + + return false +} + +func (s *IBPPeerSpec) HSMSet() bool { + if s.HSM != nil && s.HSM.PKCS11Endpoint != "" { + return true + } + + return false +} + +func (s *IBPPeerSpec) DomainSet() bool { + if s.Domain != "" { + return true + } + + return false +} + +func (s *IBPPeerSpec) UsingLevelDB() bool { + if strings.ToLower(s.StateDb) == "leveldb" { + return true + } + + return false +} + +func (s *IBPPeerSpec) GetNumSecondsWarningPeriod() int64 { + daysToSecondsConversion := int64(24 * 60 * 60) + if s.NumSecondsWarningPeriod == 0 { + // Default to the equivalent of 30 days + return 30 * daysToSecondsConversion + } + return s.NumSecondsWarningPeriod +} + +func (p *IBPPeerStatus) HasType() bool { + if p.CRStatus.Type != "" { + return true + } + return false +} + +func (i *PeerImages) Override(requested *PeerImages, registryURL string, arch string) { + if requested == nil { + requested = &PeerImages{} + } + + // Images + i.PeerInitImage = image.GetImage(registryURL, i.PeerInitImage, requested.PeerInitImage) + i.PeerImage = image.GetImage(registryURL, i.PeerImage, requested.PeerImage) + i.CouchDBImage = image.GetImage(registryURL, i.CouchDBImage, requested.CouchDBImage) + i.DindImage = image.GetImage(registryURL, i.DindImage, requested.DindImage) + i.GRPCWebImage = image.GetImage(registryURL, i.GRPCWebImage, requested.GRPCWebImage) + i.FluentdImage = image.GetImage(registryURL, i.FluentdImage, requested.FluentdImage) + i.CCLauncherImage = image.GetImage(registryURL, i.CCLauncherImage, requested.CCLauncherImage) + i.FileTransferImage = image.GetImage(registryURL, i.FileTransferImage, requested.FileTransferImage) + i.BuilderImage = image.GetImage(registryURL, i.BuilderImage, requested.BuilderImage) + i.GoEnvImage = image.GetImage(registryURL, i.GoEnvImage, requested.GoEnvImage) + i.JavaEnvImage = image.GetImage(registryURL, i.JavaEnvImage, requested.JavaEnvImage) + i.NodeEnvImage = image.GetImage(registryURL, i.NodeEnvImage, requested.NodeEnvImage) + i.HSMImage = image.GetImage(registryURL, i.HSMImage, requested.HSMImage) + i.EnrollerImage = image.GetImage(registryURL, i.EnrollerImage, requested.EnrollerImage) + + // Tags + i.PeerInitTag = image.GetTag(arch, i.PeerInitTag, requested.PeerInitTag) + i.PeerTag = image.GetTag(arch, i.PeerTag, requested.PeerTag) + i.CouchDBTag = image.GetTag(arch, i.CouchDBTag, requested.CouchDBTag) + i.DindTag = image.GetTag(arch, i.DindTag, requested.DindTag) + i.GRPCWebTag = image.GetTag(arch, i.GRPCWebTag, requested.GRPCWebTag) + i.FluentdTag = image.GetTag(arch, i.FluentdTag, requested.FluentdTag) + i.CCLauncherTag = image.GetTag(arch, i.CCLauncherTag, requested.CCLauncherTag) + i.FileTransferTag = image.GetTag(arch, i.FileTransferTag, requested.FileTransferTag) + i.BuilderTag = image.GetTag(arch, i.BuilderTag, requested.BuilderTag) + i.GoEnvTag = image.GetTag(arch, i.GoEnvTag, requested.GoEnvTag) + i.JavaEnvTag = image.GetTag(arch, i.JavaEnvTag, requested.JavaEnvTag) + i.NodeEnvTag = image.GetTag(arch, i.NodeEnvTag, requested.NodeEnvTag) + i.HSMTag = image.GetTag(arch, i.HSMTag, requested.HSMTag) + i.EnrollerTag = image.GetTag(arch, i.EnrollerTag, requested.EnrollerTag) +} + +func init() { + SchemeBuilder.Register(&IBPPeer{}, &IBPPeerList{}) +} diff --git a/api/v1beta1/ibppeer_types.go b/api/v1beta1/ibppeer_types.go new file mode 100644 index 00000000..5d78508f --- /dev/null +++ b/api/v1beta1/ibppeer_types.go @@ -0,0 +1,490 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +// IBPPeerSpec defines the desired state of IBPPeer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type IBPPeerSpec struct { + // License should be accepted by the user to be able to setup Peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + License License `json:"license"` + + /* generic configs - images/resources/storage/servicetype/version/replicas */ + + // Images (Optional) lists the images to be used for peer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Images *PeerImages `json:"images,omitempty"` + + // RegistryURL is registry url used to pull images + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + RegistryURL string `json:"registryURL,omitempty"` + + // ImagePullSecrets (Optional) is the list of ImagePullSecrets to be used for peer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // Replicas (Optional - default 1) is the number of peer replicas to be setup + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Replicas *int32 `json:"replicas,omitempty"` + + // Resources (Optional) is the amount of resources to be provided to peer deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Resources *PeerResources `json:"resources,omitempty"` + + // Service (Optional) is the override object for peer's service + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Service *Service `json:"service,omitempty"` + + // Storage (Optional - uses default storageclass if not provided) is the override object for peer's PVC config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Storage *PeerStorages `json:"storage,omitempty"` + + /* peer specific configs */ + // MSPID is the msp id of the peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MSPID string `json:"mspID,omitempty"` + + // StateDb (Optional) is the statedb used for peer, can be couchdb or leveldb + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + StateDb string `json:"stateDb,omitempty"` + + // ConfigOverride (Optional) is the object to provide overrides to core yaml config + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + ConfigOverride *runtime.RawExtension `json:"configoverride,omitempty"` + + // HSM (Optional) is DEPRECATED + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSM *HSM `json:"hsm,omitempty"` + + // DisableNodeOU (Optional) is used to switch nodeou on and off + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DisableNodeOU *bool `json:"disablenodeou,omitempty"` + + // CustomNames (Optional) is to use pre-configured resources for peer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CustomNames PeerCustomNames `json:"customNames,omitempty"` + + // FabricVersion (Optional) is fabric version for the peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FabricVersion string `json:"version"` + + // NumSecondsWarningPeriod (Optional - default 30 days) is used to define certificate expiry warning period. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NumSecondsWarningPeriod int64 `json:"numSecondsWarningPeriod,omitempty"` + + /* msp data can be passed in secret on in spec */ + // MSPSecret (Optional) is secret used to store msp crypto + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + MSPSecret string `json:"mspSecret,omitempty"` + + // Secret is object for msp crypto + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Secret *SecretSpec `json:"secret,omitempty"` + + /* proxy ip passed if not OCP, domain for OCP */ + // Domain is the sub-domain used for peer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Domain string `json:"domain,omitempty"` + + // Ingress (Optional) is ingress object for ingress overrides + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ingress Ingress `json:"ingress,omitempty"` + + // PeerExternalEndpoint (Optional) is used to override peer external endpoint + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PeerExternalEndpoint string `json:"peerExternalEndpoint,omitempty"` + + /* cluster related configs */ + // Arch (Optional) is the architecture of the nodes where peer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Arch []string `json:"arch,omitempty"` + + // Region (Optional) is the region of the nodes where the peer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Region string `json:"region,omitempty"` + + // Zone (Optional) is the zone of the nodes where the peer should be deployed + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Zone string `json:"zone,omitempty"` + + /* advanced configs */ + // DindArgs (Optional) is used to override args passed to dind container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DindArgs []string `json:"dindArgs,omitempty"` + + // Action (Optional) is object for peer actions + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Action PeerAction `json:"action,omitempty"` + + // ChaincodeBuilderConfig (Optional) is a k/v map providing a scope for template + // substitutions defined in chaincode-as-a-service package metadata files. + // The map will be serialized as JSON and set in the peer deployment + // CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG env variable. + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + ChaincodeBuilderConfig ChaincodeBuilderConfig `json:"chaincodeBuilderConfig,omitempty"` +} + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +// IBPPeerStatus defines the observed state of IBPPeer +// +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true +type IBPPeerStatus struct { + CRStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:storageversion +// +k8s:deepcopy-gen=true +// +kubebuilder:subresource:status +// IBPPeer is the Schema for the ibppeers API. +// Warning: Peer deployment using this tile is not supported. Please use the IBP Console to deploy a Peer. +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +// +operator-sdk:gen-csv:customresourcedefinitions.displayName="IBP Peer" +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Deployments,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Ingresses,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`PersistentVolumeClaim,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Role,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`RoleBinding,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Route,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Services,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ServiceAccounts,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`ConfigMaps,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Secrets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Pods,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Replicasets,v1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPCA,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPPeer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPOrderer,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`IBPConsole,v1beta1,""` +// +operator-sdk:gen-csv:customresourcedefinitions.resources=`clusterversions,v1,""` +type IBPPeer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IBPPeerSpec `json:"spec"` + // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true + Status IBPPeerStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +// IBPPeerList contains a list of IBPPeer +type IBPPeerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IBPPeer `json:"items"` +} + +// +k8s:deepcopy-gen=true +// PeerResources is the overrides to the resources of the peer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerResources struct { + // Init (Optional) is the resources provided to the init container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Init *corev1.ResourceRequirements `json:"init,omitempty"` + + /// Peer (Optional) is the resources provided to the peer container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Peer *corev1.ResourceRequirements `json:"peer,omitempty"` + + // GRPCProxy (Optional) is the resources provided to the proxy container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GRPCProxy *corev1.ResourceRequirements `json:"proxy,omitempty"` + + // FluentD (Optional) is the resources provided to the fluentd container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FluentD *corev1.ResourceRequirements `json:"fluentd,omitempty"` + + // DinD (Optional) is the resources provided to the dind container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DinD *corev1.ResourceRequirements `json:"dind,omitempty"` + + // CouchDB (Optional) is the resources provided to the couchdb container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CouchDB *corev1.ResourceRequirements `json:"couchdb,omitempty"` + + // CCLauncher (Optional) is the resources provided to the cclauncher container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CCLauncher *corev1.ResourceRequirements `json:"chaincodelauncher,omitempty"` + + // Enroller (Optional) is the resources provided to the enroller container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Enroller *corev1.ResourceRequirements `json:"enroller,omitempty"` + + // HSMDaemon (Optional) is the resources provided to the HSM Daemon container + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMDaemon *corev1.ResourceRequirements `json:"hsmdaemon,omitempty"` +} + +// +k8s:deepcopy-gen=true +// PeerStorages is the overrides to the storage of the peer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerStorages struct { + // StateDB (Optional) is the configuration of the storage of the statedb + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + StateDB *StorageSpec `json:"statedb,omitempty"` + + // Peer (Optional) is the configuration of the storage of the peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Peer *StorageSpec `json:"peer,omitempty"` +} + +// PeerImages is the list of images to be used in peer deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerImages struct { + // PeerInitImage is the name of the peer init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PeerInitImage string `json:"peerInitImage,omitempty"` + + // PeerInitTag is the tag of the peer init image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PeerInitTag string `json:"peerInitTag,omitempty"` + + // PeerImage is the name of the peer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PeerImage string `json:"peerImage,omitempty"` + + // PeerTag is the tag of the peer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PeerTag string `json:"peerTag,omitempty"` + + // DindImage is the name of the dind image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DindImage string `json:"dindImage,omitempty"` + + // DindTag is the tag of the dind image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + DindTag string `json:"dindTag,omitempty"` + + // GRPCWebImage is the name of the grpc web proxy image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GRPCWebImage string `json:"grpcwebImage,omitempty"` + + // GRPCWebTag is the tag of the grpc web proxy image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GRPCWebTag string `json:"grpcwebTag,omitempty"` + + // FluentdImage is the name of the fluentd logger image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FluentdImage string `json:"fluentdImage,omitempty"` + + // FluentdTag is the tag of the fluentd logger image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FluentdTag string `json:"fluentdTag,omitempty"` + + // CouchDBImage is the name of the couchdb image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CouchDBImage string `json:"couchdbImage,omitempty"` + + // CouchDBTag is the tag of the couchdb image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CouchDBTag string `json:"couchdbTag,omitempty"` + + // CCLauncherImage is the name of the chaincode launcher image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CCLauncherImage string `json:"chaincodeLauncherImage,omitempty"` + + // CCLauncherTag is the tag of the chaincode launcher image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + CCLauncherTag string `json:"chaincodeLauncherTag,omitempty"` + + // FileTransferImage is the name of the file transfer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FileTransferImage string `json:"fileTransferImage,omitempty"` + + // FileTransferTag is the tag of the file transfer image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + FileTransferTag string `json:"fileTransferTag,omitempty"` + + // BuilderImage is the name of the builder image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + BuilderImage string `json:"builderImage,omitempty"` + + // BuilderTag is the tag of the builder image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + BuilderTag string `json:"builderTag,omitempty"` + + // GoEnvImage is the name of the goenv image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GoEnvImage string `json:"goEnvImage,omitempty"` + + // GoEnvTag is the tag of the goenv image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + GoEnvTag string `json:"goEnvTag,omitempty"` + + // JavaEnvImage is the name of the javaenv image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + JavaEnvImage string `json:"javaEnvImage,omitempty"` + + // JavaEnvTag is the tag of the javaenv image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + JavaEnvTag string `json:"javaEnvTag,omitempty"` + + // NodeEnvImage is the name of the nodeenv image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NodeEnvImage string `json:"nodeEnvImage,omitempty"` + + // NodeEnvTag is the tag of the nodeenv image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + NodeEnvTag string `json:"nodeEnvTag,omitempty"` + + // HSMImage is the name of the hsm image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMImage string `json:"hsmImage,omitempty"` + + // HSMTag is the tag of the hsm image + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + HSMTag string `json:"hsmTag,omitempty"` + + // EnrollerImage is the name of the init image for crypto generation + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollerImage string `json:"enrollerImage,omitempty"` + + // EnrollerTag is the tag of the init image for crypto generation + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EnrollerTag string `json:"enrollerTag,omitempty"` +} + +// +k8s:deepcopy-gen=true +// PeerConnectionProfile provides necessary information to connect to the peer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerConnectionProfile struct { + // Endpoints is list of endpoints to communicate with the peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Endpoints PeerEndpoints `json:"endpoints"` + + // TLS is object with tls crypto material for peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLS *MSP `json:"tls"` + + // Component is object with ecert crypto material for peer + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Component *MSP `json:"component"` +} + +// PeerEndpoints is the list of endpoints to communicate with the peer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerEndpoints struct { + // API is the endpoint to communicate with peer's API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + API string `json:"api"` + + // Operations is the endpoint to communicate with peer's Operations API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Operations string `json:"operations"` + + // Grpcweb is the endpoint to communicate with peers's grpcweb proxy API + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Grpcweb string `json:"grpcweb"` +} + +// +k8s:deepcopy-gen=true +// PeerCustomNames is the list of preconfigured objects to be used for peer's deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerCustomNames struct { + // PVC is the list of PVC Names to be used for peer's deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + PVC PeerPVCNames `json:"pvc,omitempty"` +} + +// PeerPVCNames is the list of PVC Names to be used for peer's deployment +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerPVCNames struct { + // Peer is the pvc to be used as peer's storage + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Peer string `json:"peer,omitempty"` + + // StateDB is the pvc to be used as statedb's storage + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + StateDB string `json:"statedb,omitempty"` +} + +// Action contains actions that can be performed on peer +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerAction struct { + // Restart action is used to restart peer deployment + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Restart bool `json:"restart,omitempty"` + + // Reenroll contains actions for triggering crypto reenroll + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Reenroll PeerReenrollAction `json:"reenroll,omitempty"` + + // Enroll contains actions for triggering crypto enroll + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Enroll PeerEnrollAction `json:"enroll,omitempty"` + + // UpgradeDBs action is used to trigger peer node upgrade-dbs command + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + UpgradeDBs bool `json:"upgradedbs,omitempty"` +} + +// PeerReenrollAction contains actions for reenrolling crypto +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerReenrollAction struct { + // Ecert is used to trigger reenroll for ecert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ecert bool `json:"ecert,omitempty"` + + // EcertNewKey is used to trigger reenroll for ecert and also generating + // a new private key + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + EcertNewKey bool `json:"ecertNewKey,omitempty"` + + // TLSCert is used to trigger reenroll for tlscert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCert bool `json:"tlscert,omitempty"` + + // TLSCertNewKey is used to trigger reenroll for tlscert and also generating + // a new private key + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCertNewKey bool `json:"tlscertNewKey,omitempty"` +} + +// PeerReenrollAction contains actions for enrolling crypto +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type PeerEnrollAction struct { + // Ecert is used to trigger enroll for ecert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + Ecert bool `json:"ecert,omitempty"` + + // TLSCert is used to trigger enroll for tlscert + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + TLSCert bool `json:"tlscert,omitempty"` +} + +// ChaincodeBuilderConfig defines a k/v mapping scope for template substitutions +// referenced within a chaincode package archive. The mapping is serialized as +// JSON and appended to the peer env as CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG. +// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true +type ChaincodeBuilderConfig map[string]string diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..7a28711a --- /dev/null +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1965 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAAction) DeepCopyInto(out *CAAction) { + *out = *in + out.Renew = in.Renew +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAAction. +func (in *CAAction) DeepCopy() *CAAction { + if in == nil { + return nil + } + out := new(CAAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAConnectionProfile) DeepCopyInto(out *CAConnectionProfile) { + *out = *in + out.Endpoints = in.Endpoints + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ConnectionProfileTLS) + **out = **in + } + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(MSP) + (*in).DeepCopyInto(*out) + } + if in.TLSCA != nil { + in, out := &in.TLSCA, &out.TLSCA + *out = new(MSP) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAConnectionProfile. +func (in *CAConnectionProfile) DeepCopy() *CAConnectionProfile { + if in == nil { + return nil + } + out := new(CAConnectionProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CACustomNames) DeepCopyInto(out *CACustomNames) { + *out = *in + out.PVC = in.PVC +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CACustomNames. +func (in *CACustomNames) DeepCopy() *CACustomNames { + if in == nil { + return nil + } + out := new(CACustomNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAEndpoints) DeepCopyInto(out *CAEndpoints) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAEndpoints. +func (in *CAEndpoints) DeepCopy() *CAEndpoints { + if in == nil { + return nil + } + out := new(CAEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAImages) DeepCopyInto(out *CAImages) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAImages. +func (in *CAImages) DeepCopy() *CAImages { + if in == nil { + return nil + } + out := new(CAImages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAPVCNames) DeepCopyInto(out *CAPVCNames) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAPVCNames. +func (in *CAPVCNames) DeepCopy() *CAPVCNames { + if in == nil { + return nil + } + out := new(CAPVCNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAResources) DeepCopyInto(out *CAResources) { + *out = *in + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.EnrollJob != nil { + in, out := &in.EnrollJob, &out.EnrollJob + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.HSMDaemon != nil { + in, out := &in.HSMDaemon, &out.HSMDaemon + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAResources. +func (in *CAResources) DeepCopy() *CAResources { + if in == nil { + return nil + } + out := new(CAResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAStorages) DeepCopyInto(out *CAStorages) { + *out = *in + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(StorageSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAStorages. +func (in *CAStorages) DeepCopy() *CAStorages { + if in == nil { + return nil + } + out := new(CAStorages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CATLS) DeepCopyInto(out *CATLS) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CATLS. +func (in *CATLS) DeepCopy() *CATLS { + if in == nil { + return nil + } + out := new(CATLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRN) DeepCopyInto(out *CRN) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRN. +func (in *CRN) DeepCopy() *CRN { + if in == nil { + return nil + } + out := new(CRN) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRStatus) DeepCopyInto(out *CRStatus) { + *out = *in + out.Versions = in.Versions +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRStatus. +func (in *CRStatus) DeepCopy() *CRStatus { + if in == nil { + return nil + } + out := new(CRStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRStatusVersion) DeepCopyInto(out *CRStatusVersion) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRStatusVersion. +func (in *CRStatusVersion) DeepCopy() *CRStatusVersion { + if in == nil { + return nil + } + out := new(CRStatusVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSR) DeepCopyInto(out *CSR) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSR. +func (in *CSR) DeepCopy() *CSR { + if in == nil { + return nil + } + out := new(CSR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ChaincodeBuilderConfig) DeepCopyInto(out *ChaincodeBuilderConfig) { + { + in := &in + *out = make(ChaincodeBuilderConfig, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaincodeBuilderConfig. +func (in ChaincodeBuilderConfig) DeepCopy() ChaincodeBuilderConfig { + if in == nil { + return nil + } + out := new(ChaincodeBuilderConfig) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigOverride) DeepCopyInto(out *ConfigOverride) { + *out = *in + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.TLSCA != nil { + in, out := &in.TLSCA, &out.TLSCA + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.MaxNameLength != nil { + in, out := &in.MaxNameLength, &out.MaxNameLength + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigOverride. +func (in *ConfigOverride) DeepCopy() *ConfigOverride { + if in == nil { + return nil + } + out := new(ConfigOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionProfileTLS) DeepCopyInto(out *ConnectionProfileTLS) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionProfileTLS. +func (in *ConnectionProfileTLS) DeepCopy() *ConnectionProfileTLS { + if in == nil { + return nil + } + out := new(ConnectionProfileTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleAction) DeepCopyInto(out *ConsoleAction) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAction. +func (in *ConsoleAction) DeepCopy() *ConsoleAction { + if in == nil { + return nil + } + out := new(ConsoleAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleImages) DeepCopyInto(out *ConsoleImages) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleImages. +func (in *ConsoleImages) DeepCopy() *ConsoleImages { + if in == nil { + return nil + } + out := new(ConsoleImages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleOverrides) DeepCopyInto(out *ConsoleOverrides) { + *out = *in + if in.Console != nil { + in, out := &in.Console, &out.Console + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Deployer != nil { + in, out := &in.Deployer, &out.Deployer + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.MaxNameLength != nil { + in, out := &in.MaxNameLength, &out.MaxNameLength + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleOverrides. +func (in *ConsoleOverrides) DeepCopy() *ConsoleOverrides { + if in == nil { + return nil + } + out := new(ConsoleOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleOverridesConsole) DeepCopyInto(out *ConsoleOverridesConsole) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleOverridesConsole. +func (in *ConsoleOverridesConsole) DeepCopy() *ConsoleOverridesConsole { + if in == nil { + return nil + } + out := new(ConsoleOverridesConsole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleOverridesDeployer) DeepCopyInto(out *ConsoleOverridesDeployer) { + *out = *in + if in.Timeouts != nil { + in, out := &in.Timeouts, &out.Timeouts + *out = new(DeployerTimeouts) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleOverridesDeployer. +func (in *ConsoleOverridesDeployer) DeepCopy() *ConsoleOverridesDeployer { + if in == nil { + return nil + } + out := new(ConsoleOverridesDeployer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleResources) DeepCopyInto(out *ConsoleResources) { + *out = *in + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.CouchDB != nil { + in, out := &in.CouchDB, &out.CouchDB + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Console != nil { + in, out := &in.Console, &out.Console + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Deployer != nil { + in, out := &in.Deployer, &out.Deployer + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Configtxlator != nil { + in, out := &in.Configtxlator, &out.Configtxlator + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleResources. +func (in *ConsoleResources) DeepCopy() *ConsoleResources { + if in == nil { + return nil + } + out := new(ConsoleResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStorage) DeepCopyInto(out *ConsoleStorage) { + *out = *in + if in.Console != nil { + in, out := &in.Console, &out.Console + *out = new(StorageSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStorage. +func (in *ConsoleStorage) DeepCopy() *ConsoleStorage { + if in == nil { + return nil + } + out := new(ConsoleStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployer) DeepCopyInto(out *Deployer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployer. +func (in *Deployer) DeepCopy() *Deployer { + if in == nil { + return nil + } + out := new(Deployer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployerTimeouts) DeepCopyInto(out *DeployerTimeouts) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployerTimeouts. +func (in *DeployerTimeouts) DeepCopy() *DeployerTimeouts { + if in == nil { + return nil + } + out := new(DeployerTimeouts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Enrollment) DeepCopyInto(out *Enrollment) { + *out = *in + if in.CATLS != nil { + in, out := &in.CATLS, &out.CATLS + *out = new(CATLS) + **out = **in + } + if in.AdminCerts != nil { + in, out := &in.AdminCerts, &out.AdminCerts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.CSR != nil { + in, out := &in.CSR, &out.CSR + *out = new(CSR) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Enrollment. +func (in *Enrollment) DeepCopy() *Enrollment { + if in == nil { + return nil + } + out := new(Enrollment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnrollmentSpec) DeepCopyInto(out *EnrollmentSpec) { + *out = *in + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(Enrollment) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(Enrollment) + (*in).DeepCopyInto(*out) + } + if in.ClientAuth != nil { + in, out := &in.ClientAuth, &out.ClientAuth + *out = new(Enrollment) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnrollmentSpec. +func (in *EnrollmentSpec) DeepCopy() *EnrollmentSpec { + if in == nil { + return nil + } + out := new(EnrollmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HSM) DeepCopyInto(out *HSM) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HSM. +func (in *HSM) DeepCopy() *HSM { + if in == nil { + return nil + } + out := new(HSM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPCA) DeepCopyInto(out *IBPCA) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPCA. +func (in *IBPCA) DeepCopy() *IBPCA { + if in == nil { + return nil + } + out := new(IBPCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPCA) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPCAList) DeepCopyInto(out *IBPCAList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBPCA, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPCAList. +func (in *IBPCAList) DeepCopy() *IBPCAList { + if in == nil { + return nil + } + out := new(IBPCAList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPCAList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPCASpec) DeepCopyInto(out *IBPCASpec) { + *out = *in + out.License = in.License + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = new(CAImages) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(CAResources) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(Service) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(CAStorages) + (*in).DeepCopyInto(*out) + } + if in.ConfigOverride != nil { + in, out := &in.ConfigOverride, &out.ConfigOverride + *out = new(ConfigOverride) + (*in).DeepCopyInto(*out) + } + if in.HSM != nil { + in, out := &in.HSM, &out.HSM + *out = new(HSM) + **out = **in + } + out.CustomNames = in.CustomNames + out.Ingress = in.Ingress + if in.Arch != nil { + in, out := &in.Arch, &out.Arch + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Action = in.Action +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPCASpec. +func (in *IBPCASpec) DeepCopy() *IBPCASpec { + if in == nil { + return nil + } + out := new(IBPCASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPCAStatus) DeepCopyInto(out *IBPCAStatus) { + *out = *in + out.CRStatus = in.CRStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPCAStatus. +func (in *IBPCAStatus) DeepCopy() *IBPCAStatus { + if in == nil { + return nil + } + out := new(IBPCAStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPConsole) DeepCopyInto(out *IBPConsole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPConsole. +func (in *IBPConsole) DeepCopy() *IBPConsole { + if in == nil { + return nil + } + out := new(IBPConsole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPConsole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPConsoleList) DeepCopyInto(out *IBPConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBPConsole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPConsoleList. +func (in *IBPConsoleList) DeepCopy() *IBPConsoleList { + if in == nil { + return nil + } + out := new(IBPConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPConsoleSpec) DeepCopyInto(out *IBPConsoleSpec) { + *out = *in + out.License = in.License + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = new(ConsoleImages) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ConsoleResources) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(Service) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(ConsoleStorage) + (*in).DeepCopyInto(*out) + } + if in.NetworkInfo != nil { + in, out := &in.NetworkInfo, &out.NetworkInfo + *out = new(NetworkInfo) + **out = **in + } + out.Ingress = in.Ingress + if in.ClusterData != nil { + in, out := &in.ClusterData, &out.ClusterData + *out = new(consolev1.IBPConsoleClusterData) + (*in).DeepCopyInto(*out) + } + if in.FeatureFlags != nil { + in, out := &in.FeatureFlags, &out.FeatureFlags + *out = new(consolev1.FeatureFlags) + (*in).DeepCopyInto(*out) + } + if in.IBMID != nil { + in, out := &in.IBMID, &out.IBMID + *out = new(consolev1.IBMID) + **out = **in + } + if in.Proxying != nil { + in, out := &in.Proxying, &out.Proxying + *out = new(bool) + **out = **in + } + if in.CRN != nil { + in, out := &in.CRN, &out.CRN + *out = new(CRN) + **out = **in + } + if in.Kubeconfig != nil { + in, out := &in.Kubeconfig, &out.Kubeconfig + *out = new([]byte) + if **in != nil { + in, out := *in, *out + *out = make([]byte, len(*in)) + copy(*out, *in) + } + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = new(Versions) + (*in).DeepCopyInto(*out) + } + if in.Deployer != nil { + in, out := &in.Deployer, &out.Deployer + *out = new(Deployer) + **out = **in + } + if in.Arch != nil { + in, out := &in.Arch, &out.Arch + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigOverride != nil { + in, out := &in.ConfigOverride, &out.ConfigOverride + *out = new(ConsoleOverrides) + (*in).DeepCopyInto(*out) + } + out.Action = in.Action + if in.UseTags != nil { + in, out := &in.UseTags, &out.UseTags + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPConsoleSpec. +func (in *IBPConsoleSpec) DeepCopy() *IBPConsoleSpec { + if in == nil { + return nil + } + out := new(IBPConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPConsoleStatus) DeepCopyInto(out *IBPConsoleStatus) { + *out = *in + out.CRStatus = in.CRStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPConsoleStatus. +func (in *IBPConsoleStatus) DeepCopy() *IBPConsoleStatus { + if in == nil { + return nil + } + out := new(IBPConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPOrderer) DeepCopyInto(out *IBPOrderer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPOrderer. +func (in *IBPOrderer) DeepCopy() *IBPOrderer { + if in == nil { + return nil + } + out := new(IBPOrderer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPOrderer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPOrdererClusterLocation) DeepCopyInto(out *IBPOrdererClusterLocation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPOrdererClusterLocation. +func (in *IBPOrdererClusterLocation) DeepCopy() *IBPOrdererClusterLocation { + if in == nil { + return nil + } + out := new(IBPOrdererClusterLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPOrdererList) DeepCopyInto(out *IBPOrdererList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBPOrderer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPOrdererList. +func (in *IBPOrdererList) DeepCopy() *IBPOrdererList { + if in == nil { + return nil + } + out := new(IBPOrdererList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPOrdererList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPOrdererSpec) DeepCopyInto(out *IBPOrdererSpec) { + *out = *in + out.License = in.License + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = new(OrdererImages) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(OrdererResources) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(Service) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(OrdererStorages) + (*in).DeepCopyInto(*out) + } + if in.UseChannelLess != nil { + in, out := &in.UseChannelLess, &out.UseChannelLess + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretSpec) + (*in).DeepCopyInto(*out) + } + if in.ConfigOverride != nil { + in, out := &in.ConfigOverride, &out.ConfigOverride + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.HSM != nil { + in, out := &in.HSM, &out.HSM + *out = new(HSM) + **out = **in + } + if in.IsPrecreate != nil { + in, out := &in.IsPrecreate, &out.IsPrecreate + *out = new(bool) + **out = **in + } + if in.ClusterLocation != nil { + in, out := &in.ClusterLocation, &out.ClusterLocation + *out = make([]IBPOrdererClusterLocation, len(*in)) + copy(*out, *in) + } + if in.ClusterConfigOverride != nil { + in, out := &in.ClusterConfigOverride, &out.ClusterConfigOverride + *out = make([]*runtime.RawExtension, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + } + } + if in.ClusterSecret != nil { + in, out := &in.ClusterSecret, &out.ClusterSecret + *out = make([]*SecretSpec, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SecretSpec) + (*in).DeepCopyInto(*out) + } + } + } + if in.NodeNumber != nil { + in, out := &in.NodeNumber, &out.NodeNumber + *out = new(int) + **out = **in + } + out.Ingress = in.Ingress + if in.Arch != nil { + in, out := &in.Arch, &out.Arch + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisableNodeOU != nil { + in, out := &in.DisableNodeOU, &out.DisableNodeOU + *out = new(bool) + **out = **in + } + out.CustomNames = in.CustomNames + out.Action = in.Action +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPOrdererSpec. +func (in *IBPOrdererSpec) DeepCopy() *IBPOrdererSpec { + if in == nil { + return nil + } + out := new(IBPOrdererSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPOrdererStatus) DeepCopyInto(out *IBPOrdererStatus) { + *out = *in + out.CRStatus = in.CRStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPOrdererStatus. +func (in *IBPOrdererStatus) DeepCopy() *IBPOrdererStatus { + if in == nil { + return nil + } + out := new(IBPOrdererStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPPeer) DeepCopyInto(out *IBPPeer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPPeer. +func (in *IBPPeer) DeepCopy() *IBPPeer { + if in == nil { + return nil + } + out := new(IBPPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPPeer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPPeerList) DeepCopyInto(out *IBPPeerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBPPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPPeerList. +func (in *IBPPeerList) DeepCopy() *IBPPeerList { + if in == nil { + return nil + } + out := new(IBPPeerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPPeerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPPeerSpec) DeepCopyInto(out *IBPPeerSpec) { + *out = *in + out.License = in.License + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = new(PeerImages) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(PeerResources) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(Service) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(PeerStorages) + (*in).DeepCopyInto(*out) + } + if in.ConfigOverride != nil { + in, out := &in.ConfigOverride, &out.ConfigOverride + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.HSM != nil { + in, out := &in.HSM, &out.HSM + *out = new(HSM) + **out = **in + } + if in.DisableNodeOU != nil { + in, out := &in.DisableNodeOU, &out.DisableNodeOU + *out = new(bool) + **out = **in + } + out.CustomNames = in.CustomNames + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretSpec) + (*in).DeepCopyInto(*out) + } + out.Ingress = in.Ingress + if in.Arch != nil { + in, out := &in.Arch, &out.Arch + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DindArgs != nil { + in, out := &in.DindArgs, &out.DindArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Action = in.Action + if in.ChaincodeBuilderConfig != nil { + in, out := &in.ChaincodeBuilderConfig, &out.ChaincodeBuilderConfig + *out = make(ChaincodeBuilderConfig, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPPeerSpec. +func (in *IBPPeerSpec) DeepCopy() *IBPPeerSpec { + if in == nil { + return nil + } + out := new(IBPPeerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPPeerStatus) DeepCopyInto(out *IBPPeerStatus) { + *out = *in + out.CRStatus = in.CRStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPPeerStatus. +func (in *IBPPeerStatus) DeepCopy() *IBPPeerStatus { + if in == nil { + return nil + } + out := new(IBPPeerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *License) DeepCopyInto(out *License) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new License. +func (in *License) DeepCopy() *License { + if in == nil { + return nil + } + out := new(License) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSP) DeepCopyInto(out *MSP) { + *out = *in + if in.CACerts != nil { + in, out := &in.CACerts, &out.CACerts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IntermediateCerts != nil { + in, out := &in.IntermediateCerts, &out.IntermediateCerts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdminCerts != nil { + in, out := &in.AdminCerts, &out.AdminCerts + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSP. +func (in *MSP) DeepCopy() *MSP { + if in == nil { + return nil + } + out := new(MSP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSPSpec) DeepCopyInto(out *MSPSpec) { + *out = *in + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(MSP) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(MSP) + (*in).DeepCopyInto(*out) + } + if in.ClientAuth != nil { + in, out := &in.ClientAuth, &out.ClientAuth + *out = new(MSP) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSPSpec. +func (in *MSPSpec) DeepCopy() *MSPSpec { + if in == nil { + return nil + } + out := new(MSPSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInfo) DeepCopyInto(out *NetworkInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInfo. +func (in *NetworkInfo) DeepCopy() *NetworkInfo { + if in == nil { + return nil + } + out := new(NetworkInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererAction) DeepCopyInto(out *OrdererAction) { + *out = *in + out.Reenroll = in.Reenroll + out.Enroll = in.Enroll +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererAction. +func (in *OrdererAction) DeepCopy() *OrdererAction { + if in == nil { + return nil + } + out := new(OrdererAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererConnectionProfile) DeepCopyInto(out *OrdererConnectionProfile) { + *out = *in + out.Endpoints = in.Endpoints + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(MSP) + (*in).DeepCopyInto(*out) + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(MSP) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererConnectionProfile. +func (in *OrdererConnectionProfile) DeepCopy() *OrdererConnectionProfile { + if in == nil { + return nil + } + out := new(OrdererConnectionProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererCustomNames) DeepCopyInto(out *OrdererCustomNames) { + *out = *in + out.PVC = in.PVC +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererCustomNames. +func (in *OrdererCustomNames) DeepCopy() *OrdererCustomNames { + if in == nil { + return nil + } + out := new(OrdererCustomNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererEndpoints) DeepCopyInto(out *OrdererEndpoints) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererEndpoints. +func (in *OrdererEndpoints) DeepCopy() *OrdererEndpoints { + if in == nil { + return nil + } + out := new(OrdererEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererEnrollAction) DeepCopyInto(out *OrdererEnrollAction) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererEnrollAction. +func (in *OrdererEnrollAction) DeepCopy() *OrdererEnrollAction { + if in == nil { + return nil + } + out := new(OrdererEnrollAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererImages) DeepCopyInto(out *OrdererImages) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererImages. +func (in *OrdererImages) DeepCopy() *OrdererImages { + if in == nil { + return nil + } + out := new(OrdererImages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererPVCNames) DeepCopyInto(out *OrdererPVCNames) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererPVCNames. +func (in *OrdererPVCNames) DeepCopy() *OrdererPVCNames { + if in == nil { + return nil + } + out := new(OrdererPVCNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererReenrollAction) DeepCopyInto(out *OrdererReenrollAction) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererReenrollAction. +func (in *OrdererReenrollAction) DeepCopy() *OrdererReenrollAction { + if in == nil { + return nil + } + out := new(OrdererReenrollAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererResources) DeepCopyInto(out *OrdererResources) { + *out = *in + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Orderer != nil { + in, out := &in.Orderer, &out.Orderer + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.GRPCProxy != nil { + in, out := &in.GRPCProxy, &out.GRPCProxy + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Enroller != nil { + in, out := &in.Enroller, &out.Enroller + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.HSMDaemon != nil { + in, out := &in.HSMDaemon, &out.HSMDaemon + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererResources. +func (in *OrdererResources) DeepCopy() *OrdererResources { + if in == nil { + return nil + } + out := new(OrdererResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdererStorages) DeepCopyInto(out *OrdererStorages) { + *out = *in + if in.Orderer != nil { + in, out := &in.Orderer, &out.Orderer + *out = new(StorageSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdererStorages. +func (in *OrdererStorages) DeepCopy() *OrdererStorages { + if in == nil { + return nil + } + out := new(OrdererStorages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerAction) DeepCopyInto(out *PeerAction) { + *out = *in + out.Reenroll = in.Reenroll + out.Enroll = in.Enroll +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerAction. +func (in *PeerAction) DeepCopy() *PeerAction { + if in == nil { + return nil + } + out := new(PeerAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerConnectionProfile) DeepCopyInto(out *PeerConnectionProfile) { + *out = *in + out.Endpoints = in.Endpoints + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(MSP) + (*in).DeepCopyInto(*out) + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(MSP) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerConnectionProfile. +func (in *PeerConnectionProfile) DeepCopy() *PeerConnectionProfile { + if in == nil { + return nil + } + out := new(PeerConnectionProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerCustomNames) DeepCopyInto(out *PeerCustomNames) { + *out = *in + out.PVC = in.PVC +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerCustomNames. +func (in *PeerCustomNames) DeepCopy() *PeerCustomNames { + if in == nil { + return nil + } + out := new(PeerCustomNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerEndpoints) DeepCopyInto(out *PeerEndpoints) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerEndpoints. +func (in *PeerEndpoints) DeepCopy() *PeerEndpoints { + if in == nil { + return nil + } + out := new(PeerEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerEnrollAction) DeepCopyInto(out *PeerEnrollAction) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerEnrollAction. +func (in *PeerEnrollAction) DeepCopy() *PeerEnrollAction { + if in == nil { + return nil + } + out := new(PeerEnrollAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerImages) DeepCopyInto(out *PeerImages) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerImages. +func (in *PeerImages) DeepCopy() *PeerImages { + if in == nil { + return nil + } + out := new(PeerImages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerPVCNames) DeepCopyInto(out *PeerPVCNames) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerPVCNames. +func (in *PeerPVCNames) DeepCopy() *PeerPVCNames { + if in == nil { + return nil + } + out := new(PeerPVCNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerReenrollAction) DeepCopyInto(out *PeerReenrollAction) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerReenrollAction. +func (in *PeerReenrollAction) DeepCopy() *PeerReenrollAction { + if in == nil { + return nil + } + out := new(PeerReenrollAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerResources) DeepCopyInto(out *PeerResources) { + *out = *in + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Peer != nil { + in, out := &in.Peer, &out.Peer + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.GRPCProxy != nil { + in, out := &in.GRPCProxy, &out.GRPCProxy + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.FluentD != nil { + in, out := &in.FluentD, &out.FluentD + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.DinD != nil { + in, out := &in.DinD, &out.DinD + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.CouchDB != nil { + in, out := &in.CouchDB, &out.CouchDB + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.CCLauncher != nil { + in, out := &in.CCLauncher, &out.CCLauncher + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Enroller != nil { + in, out := &in.Enroller, &out.Enroller + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.HSMDaemon != nil { + in, out := &in.HSMDaemon, &out.HSMDaemon + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerResources. +func (in *PeerResources) DeepCopy() *PeerResources { + if in == nil { + return nil + } + out := new(PeerResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerStorages) DeepCopyInto(out *PeerStorages) { + *out = *in + if in.StateDB != nil { + in, out := &in.StateDB, &out.StateDB + *out = new(StorageSpec) + **out = **in + } + if in.Peer != nil { + in, out := &in.Peer, &out.Peer + *out = new(StorageSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerStorages. +func (in *PeerStorages) DeepCopy() *PeerStorages { + if in == nil { + return nil + } + out := new(PeerStorages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Renew) DeepCopyInto(out *Renew) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Renew. +func (in *Renew) DeepCopy() *Renew { + if in == nil { + return nil + } + out := new(Renew) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretSpec) DeepCopyInto(out *SecretSpec) { + *out = *in + if in.Enrollment != nil { + in, out := &in.Enrollment, &out.Enrollment + *out = new(EnrollmentSpec) + (*in).DeepCopyInto(*out) + } + if in.MSP != nil { + in, out := &in.MSP, &out.MSP + *out = new(MSPSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec. +func (in *SecretSpec) DeepCopy() *SecretSpec { + if in == nil { + return nil + } + out := new(SecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionCA) DeepCopyInto(out *VersionCA) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionCA. +func (in *VersionCA) DeepCopy() *VersionCA { + if in == nil { + return nil + } + out := new(VersionCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionOrderer) DeepCopyInto(out *VersionOrderer) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionOrderer. +func (in *VersionOrderer) DeepCopy() *VersionOrderer { + if in == nil { + return nil + } + out := new(VersionOrderer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionPeer) DeepCopyInto(out *VersionPeer) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionPeer. +func (in *VersionPeer) DeepCopy() *VersionPeer { + if in == nil { + return nil + } + out := new(VersionPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Versions) DeepCopyInto(out *Versions) { + *out = *in + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = make(map[string]VersionCA, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Peer != nil { + in, out := &in.Peer, &out.Peer + *out = make(map[string]VersionPeer, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Orderer != nil { + in, out := &in.Orderer, &out.Orderer + *out = make(map[string]VersionOrderer, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Versions. +func (in *Versions) DeepCopy() *Versions { + if in == nil { + return nil + } + out := new(Versions) + in.DeepCopyInto(out) + return out +} diff --git a/boilerplate/boilerplate.go.txt b/boilerplate/boilerplate.go.txt new file mode 100644 index 00000000..5852d2f1 --- /dev/null +++ b/boilerplate/boilerplate.go.txt @@ -0,0 +1,17 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/boilerplate/boilerplate.sh.txt b/boilerplate/boilerplate.sh.txt new file mode 100644 index 00000000..bb8d1b74 --- /dev/null +++ b/boilerplate/boilerplate.sh.txt @@ -0,0 +1,17 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# \ No newline at end of file diff --git a/build/entrypoint b/build/entrypoint new file mode 100755 index 00000000..18f0352e --- /dev/null +++ b/build/entrypoint @@ -0,0 +1,31 @@ +#!/bin/sh -e + +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is documented here: +# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines + +# TODO +if ! whoami &>/dev/null; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME:-ibp-operator}:x:$(id -u):$(id -g):${USER_NAME:-ibp-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd + fi +fi + +exec ${OPERATOR} $@ \ No newline at end of file diff --git a/build/user_setup b/build/user_setup new file mode 100755 index 00000000..d2f7719e --- /dev/null +++ b/build/user_setup @@ -0,0 +1,43 @@ +#!/bin/sh + +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -x + +# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) +mkdir -p ${HOME} +chown ${USER_UID}:0 ${HOME} +mkdir -p /shared +chown ${USER_UID}:0 /shared +mkdir -p /data +chown ${USER_UID}:0 /data +mkdir -p /crypto +chown ${USER_UID}:0 /crypto +# TODO: Should consolidate all init related artificats into a single directory +mkdir -p /peerinit +chown ${USER_UID}:0 /peerinit +mkdir -p /ordererinit +chown ${USER_UID}:0 /ordererinit +chmod ug+rwx ${HOME} + +# runtime user will need to be able to self-insert in /etc/passwd +chmod g+rw /etc/passwd + +# no need for this script to remain in the image after running +rm $0 \ No newline at end of file diff --git a/bundle.Dockerfile b/bundle.Dockerfile new file mode 100644 index 00000000..ebbe8fa3 --- /dev/null +++ b/bundle.Dockerfile @@ -0,0 +1,20 @@ +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=fabric-opensource-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.19.0+git +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files to locations specified by labels. +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ diff --git a/bundle/manifests/fabric-opensource-operator.clusterserviceversion.yaml b/bundle/manifests/fabric-opensource-operator.clusterserviceversion.yaml new file mode 100644 index 00000000..cb2bdd3a --- /dev/null +++ b/bundle/manifests/fabric-opensource-operator.clusterserviceversion.yaml @@ -0,0 +1,1889 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Seamless Upgrades + categories: Database + certified: "true" + containerImage: todo:update + createdAt: "2020-07-14T00:00:00Z" + description: TODO + operators.operatorframework.io/builder: operator-sdk-v1.19.0+git + operators.operatorframework.io/internal-objects: '["ibpcas.ibp.com","ibppeers.ibp.com","ibporderers.ibp.com"]' + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + repository: "" + name: fabric-opensource-operator.v1.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Certificate Authorities issue certificates for all the identities + to transact on the network. + displayName: Hyperledger Fabric CA + kind: IBPCA + name: ibpcas.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is action object for trigerring actions + displayName: Action + path: action + - description: Renew action is object for certificate renewals + displayName: Renew + path: action.renew + - description: TLSCert action is used to renew TLS crypto for CA server + displayName: TLSCert + path: action.renew.tlscert + - description: Restart action is used to restart the running CA + displayName: Restart + path: action.restart + - description: Arch (Optional) is the architecture of the nodes where CA should + be deployed + displayName: Arch + path: arch + - description: ConfigOverride (Optional) is the object to provide overrides + to CA & TLSCA config + displayName: Config Override + path: configoverride + - description: CA (Optional) is the overrides to CA's configuration + displayName: CA + path: configoverride.ca + - description: MaxNameLength (Optional) is the maximum length of the name that + the CA can have + displayName: Max Name Length + path: configoverride.maxnamelength + - description: TLSCA (Optional) is the overrides to TLSCA's configuration + displayName: TLSCA + path: configoverride.tlsca + - description: CustomNames (Optional) is to use pre-configured resources for + CA's deployment + displayName: Custom Names + path: customNames + - description: PVC is the list of PVC Names to be used for CA's deployment + displayName: PVC + path: customNames.pvc + - description: CA is the pvc to be used as CA's storage + displayName: CA + path: customNames.pvc.ca + - description: Sqlite is the sqlite path to be used for CA's deployment + displayName: Sqlite + path: customNames.sqlitepath + - description: Domain is the sub-domain used for CA's deployment + displayName: Domain + path: domain + - description: HSM (Optional) is the paramters for the HSM if being used + displayName: HSM + path: hsm + - description: PKCS11Endpoint is the endpoint for the pkcs11 proxy + displayName: PKCS11 Endpoint + path: hsm.pkcs11endpoint + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for CA's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for CA's deployment + displayName: Images + path: images + - description: CAImage is the name of the CA image + displayName: CAImage + path: images.caImage + - description: CAInitImage is the name of the Init image + displayName: CAInit Image + path: images.caInitImage + - description: CAInitTag is the tag of the Init image + displayName: CAInit Tag + path: images.caInitTag + - description: CATag is the tag of the CA image + displayName: CATag + path: images.caTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: images.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: images.enrollerTag + - description: HSMImage is the name of the HSM image + displayName: HSMImage + path: images.hsmImage + - description: HSMTag is the tag of the HSM image + displayName: HSMTag + path: images.hsmTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: License should be accepted by the user to be able to setup CA + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: NumSecondsWarningPeriod (Optional - default 30 days) is used + to define certificate expiry warning period. + displayName: Num Seconds Warning Period + path: numSecondsWarningPeriod + - description: Region (Optional) is the region of the nodes where the CA should + be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of CA replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to CA deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CA is the resources provided to the CA container + displayName: CA + path: resources.ca + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: EnrollJJob is the resources provided to the enroll job container + displayName: Enroll Job + path: resources.enrollJob + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: HSMDaemon is the resources provided to the HSM daemon container + displayName: HSMDaemon + path: resources.hsmDaemon + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Service (Optional) is the override object for CA's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for CA's PVC config + displayName: Storage + path: storage + - description: CA is the configuration of the storage of the CA + displayName: CA + path: storage.ca + - description: Class is the storage class + displayName: Class + path: storage.ca.class + - description: Size of storage + displayName: Size + path: storage.ca.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: FabricVersion (Optional) set the fabric version you want to use. + displayName: Fabric Version + path: version + - description: Zone (Optional) is the zone of the nodes where the CA should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + - description: The Console is used to deploy and manage the CA, peer, ordering + nodes. + displayName: Fabric Operations Console + kind: IBPConsole + name: ibpconsoles.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is action object for trigerring actions + displayName: Action + path: action + - description: Arch (Optional) is the architecture of the nodes where console + should be deployed + displayName: Arch + path: arch + - description: console settings AuthScheme is auth scheme for console access + displayName: Auth Scheme + path: authScheme + - description: ClusterData is object cluster data information + displayName: Cluster Data + path: clusterdata + - description: Components is database name used for components + displayName: Components + path: components + - description: ConfigOverride (Optional) is the object to provide overrides + displayName: Config Override + path: configoverride + - description: Console is the overrides to console configuration + displayName: Console + path: configoverride.console + - description: Deployer is the overrides to deployer configuration + displayName: Deployer + path: configoverride.deployer + - description: MaxNameLength (Optional) is the maximum length of the name that + the console can have + displayName: Max Name Length + path: configoverride.maxnamelength + - description: ConfigtxlatorURL is url for configtxlator server + displayName: Configtxlator URL + path: configtxlator + - description: ConnectionString is connection url for backend database + displayName: Connection String + path: connectionString + - description: Deployer is object for deployer configs + displayName: Deployer + path: deployer + - description: DeployerTimeout is timeout value for deployer calls + displayName: Deployer Timeout + path: deployerTimeout + - description: DeployerURL is url for deployer server + displayName: Deployer URL + path: deployerUrl + - description: Email is the email used for initial access + displayName: Email + path: email + - description: FeatureFlags is object for feature flag settings + displayName: Feature Flags + path: featureflags + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for console's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for console's deployment + displayName: Images + path: images + - description: ConfigtxlatorImage is the name of the configtxlator image + displayName: Configtxlator Image + path: images.configtxlatorImage + - description: ConfigtxlatorTag is the tag of the configtxlator image + displayName: Configtxlator Tag + path: images.configtxlatorTag + - description: ConsoleImage is the name of the console image + displayName: Console Image + path: images.consoleImage + - description: ConsoleInitImage is the name of the console init image + displayName: Console Init Image + path: images.consoleInitImage + - description: ConsoleInitTag is the tag of the console init image + displayName: Console Init Tag + path: images.consoleInitTag + - description: ConsoleTag is the tag of the console image + displayName: Console Tag + path: images.consoleTag + - description: CouchDBImage is the name of the couchdb image + displayName: Couch DBImage + path: images.couchdbImage + - description: CouchDBTag is the tag of the couchdb image + displayName: Couch DBTag + path: images.couchdbTag + - description: DeployerImage is the name of the deployer image + displayName: Deployer Image + path: images.deployerImage + - description: DeployerTag is the tag of the deployer image + displayName: Deployer Tag + path: images.deployerTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: License should be accepted by the user to be able to setup console + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: NetworkInfo is object for network overrides + displayName: Network Info + path: networkinfo + - description: ConfigtxlatorPort is the port to access configtxlator + displayName: Configtxlator Port + path: networkinfo.configtxlatorPort + - description: ConsolePort is the port to access the console + displayName: Console Port + path: networkinfo.consolePort + - description: Domain for the components + displayName: Domain + path: networkinfo.domain + - description: ProxyPort is the port to access console proxy + displayName: Proxy Port + path: networkinfo.proxyPort + - description: Password is initial password to access console + displayName: Password + path: password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:password + - description: PasswordSecretName is secretname where password is stored + displayName: Password Secret Name + path: passwordSecretName + - description: Region (Optional) is the region of the nodes where the console + should be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of console replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to console deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Configtxlator is the resources provided to the configtxlator + container + displayName: Configtxlator + path: resources.configtxlator + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Console is the resources provided to the console container + displayName: Console + path: resources.console + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CouchDB is the resources provided to the couchdb container + displayName: Couch DB + path: resources.couchdb + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Deployer is the resources provided to the deployer container + displayName: Deployer + path: resources.deployer + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Service (Optional) is the override object for console's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: ServiceAccountName defines serviceaccount used for console deployment + displayName: Service Account Name + path: serviceAccountName + - description: Sessions is sessions database name to use + displayName: Sessions + path: sessions + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for CA's PVC config + displayName: Storage + path: storage + - description: Console is the configuration of the storage of the console + displayName: Console + path: storage.console + - description: Class is the storage class + displayName: Class + path: storage.console.class + - description: Size of storage + displayName: Size + path: storage.console.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: System is system database name to use + displayName: System + path: system + - description: SystemChannel is default systemchannel name + displayName: System Channel + path: systemChannel + - description: TLSSecretName is secret name to load custom tls certs + displayName: TLSSecret Name + path: tlsSecretName + - description: Version (Optional) is version for the console + displayName: Version + path: version + - description: CAImage is the name of the CA image + displayName: CAImage + path: versions.ca.image.caImage + - description: CAInitImage is the name of the Init image + displayName: CAInit Image + path: versions.ca.image.caInitImage + - description: CAInitTag is the tag of the Init image + displayName: CAInit Tag + path: versions.ca.image.caInitTag + - description: CATag is the tag of the CA image + displayName: CATag + path: versions.ca.image.caTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: versions.ca.image.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: versions.ca.image.enrollerTag + - description: HSMImage is the name of the HSM image + displayName: HSMImage + path: versions.ca.image.hsmImage + - description: HSMTag is the tag of the HSM image + displayName: HSMTag + path: versions.ca.image.hsmTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: versions.orderer.image.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: versions.orderer.image.enrollerTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: versions.orderer.image.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: versions.orderer.image.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: versions.orderer.image.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: versions.orderer.image.hsmTag + - description: OrdererImage is the name of the orderer image + displayName: Orderer Image + path: versions.orderer.image.ordererImage + - description: OrdererInitImage is the name of the orderer init image + displayName: Orderer Init Image + path: versions.orderer.image.ordererInitImage + - description: OrdererInitTag is the tag of the orderer init image + displayName: Orderer Init Tag + path: versions.orderer.image.ordererInitTag + - description: OrdererTag is the tag of the orderer image + displayName: Orderer Tag + path: versions.orderer.image.ordererTag + - description: BuilderImage is the name of the builder image + displayName: Builder Image + path: versions.peer.image.builderImage + - description: BuilderTag is the tag of the builder image + displayName: Builder Tag + path: versions.peer.image.builderTag + - description: CCLauncherImage is the name of the chaincode launcher image + displayName: CCLauncher Image + path: versions.peer.image.chaincodeLauncherImage + - description: CCLauncherTag is the tag of the chaincode launcher image + displayName: CCLauncher Tag + path: versions.peer.image.chaincodeLauncherTag + - description: CouchDBImage is the name of the couchdb image + displayName: Couch DBImage + path: versions.peer.image.couchdbImage + - description: CouchDBTag is the tag of the couchdb image + displayName: Couch DBTag + path: versions.peer.image.couchdbTag + - description: DindImage is the name of the dind image + displayName: Dind Image + path: versions.peer.image.dindImage + - description: DindTag is the tag of the dind image + displayName: Dind Tag + path: versions.peer.image.dindTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: versions.peer.image.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: versions.peer.image.enrollerTag + - description: FileTransferImage is the name of the file transfer image + displayName: File Transfer Image + path: versions.peer.image.fileTransferImage + - description: FileTransferTag is the tag of the file transfer image + displayName: File Transfer Tag + path: versions.peer.image.fileTransferTag + - description: FluentdImage is the name of the fluentd logger image + displayName: Fluentd Image + path: versions.peer.image.fluentdImage + - description: FluentdTag is the tag of the fluentd logger image + displayName: Fluentd Tag + path: versions.peer.image.fluentdTag + - description: GoEnvImage is the name of the goenv image + displayName: Go Env Image + path: versions.peer.image.goEnvImage + - description: GoEnvTag is the tag of the goenv image + displayName: Go Env Tag + path: versions.peer.image.goEnvTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: versions.peer.image.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: versions.peer.image.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: versions.peer.image.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: versions.peer.image.hsmTag + - description: JavaEnvImage is the name of the javaenv image + displayName: Java Env Image + path: versions.peer.image.javaEnvImage + - description: JavaEnvTag is the tag of the javaenv image + displayName: Java Env Tag + path: versions.peer.image.javaEnvTag + - description: NodeEnvImage is the name of the nodeenv image + displayName: Node Env Image + path: versions.peer.image.nodeEnvImage + - description: NodeEnvTag is the tag of the nodeenv image + displayName: Node Env Tag + path: versions.peer.image.nodeEnvTag + - description: PeerImage is the name of the peer image + displayName: Peer Image + path: versions.peer.image.peerImage + - description: PeerInitImage is the name of the peer init image + displayName: Peer Init Image + path: versions.peer.image.peerInitImage + - description: PeerInitTag is the tag of the peer init image + displayName: Peer Init Tag + path: versions.peer.image.peerInitTag + - description: PeerTag is the tag of the peer image + displayName: Peer Tag + path: versions.peer.image.peerTag + - description: Zone (Optional) is the zone of the nodes where the console should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + - description: Ordering nodes create the blocks that form the ledger and send + them to peers. + displayName: Hyperledger Fabric Orderer + kind: IBPOrderer + name: ibporderers.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is object for orderer actions + displayName: Action + path: action + - description: Enroll contains actions for triggering crypto enroll + displayName: Enroll + path: action.enroll + - description: Ecert is used to trigger enroll for ecert + displayName: Ecert + path: action.enroll.ecert + - description: Reenroll contains actions for triggering crypto reenroll + displayName: Reenroll + path: action.reenroll + - description: Ecert is used to trigger reenroll for ecert + displayName: Ecert + path: action.reenroll.ecert + - description: EcertNewKey is used to trigger reenroll for ecert and also generating + a new private key + displayName: Ecert New Key + path: action.reenroll.ecertNewKey + - description: TLSCert is used to trigger reenroll for tlscert + displayName: TLSCert + path: action.reenroll.tlscert + - description: TLSCertNewKey is used to trigger reenroll for tlscert and also + generating a new private key + displayName: TLSCert New Key + path: action.reenroll.tlscertNewKey + - description: Restart action is used to restart orderer deployment + displayName: Restart + path: action.restart + - description: Arch (Optional) is the architecture of the nodes where orderer + should be deployed + displayName: Arch + path: arch + - description: ClusterSize (Optional) number of orderers if a cluster + displayName: Cluster Size + path: clusterSize + - description: ClusterConfigOverride (Optional) is array of config overrides + for cluster + displayName: Cluster Config Override + path: clusterconfigoverride + - description: ClusterSecret (Optional) is array of msp crypto for cluster + displayName: Cluster Secret + path: clustersecret + - description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + displayName: Config Override + path: configoverride + - description: CustomNames (Optional) is to use pre-configured resources for + orderer's deployment + displayName: Custom Names + path: customNames + - description: PVC is the list of PVC Names to be used for orderer's deployment + displayName: PVC + path: customNames.pvc + - description: Orderer is the pvc to be used as orderer's storage + displayName: Orderer + path: customNames.pvc.orderer + - description: DisableNodeOU (Optional) is used to switch nodeou on and off + displayName: Disable Node OU + path: disablenodeou + - description: Domain is the sub-domain used for orderer's deployment + displayName: Domain + path: domain + - description: ExternalAddress (Optional) is used internally + displayName: External Address + path: externalAddress + - description: GenesisBlock (Optional) is genesis block to start the orderer + displayName: Genesis Block + path: genesisBlock + - description: HSM (Optional) is the paramters for the HSM if being used + displayName: HSM + path: hsm + - description: PKCS11Endpoint is the endpoint for the pkcs11 proxy + displayName: PKCS11 Endpoint + path: hsm.pkcs11endpoint + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for orderer's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for orderer's deployment + displayName: Images + path: images + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: images.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: images.enrollerTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: images.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: images.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: images.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: images.hsmTag + - description: OrdererImage is the name of the orderer image + displayName: Orderer Image + path: images.ordererImage + - description: OrdererInitImage is the name of the orderer init image + displayName: Orderer Init Image + path: images.ordererInitImage + - description: OrdererInitTag is the tag of the orderer init image + displayName: Orderer Init Tag + path: images.ordererInitTag + - description: OrdererTag is the tag of the orderer image + displayName: Orderer Tag + path: images.ordererTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: IsPrecreate (Optional) defines if orderer is in precreate state + displayName: Is Precreate + path: isprecreate + - description: License should be accepted by the user to be able to setup orderer + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: ClusterLocation (Optional) is array of cluster location settings + for cluster + displayName: Cluster Location + path: location + - description: Region (Optional) is the region of the nodes where the orderer + should be deployed + displayName: Region + path: location[0].region + - description: Zone (Optional) is the zone of the nodes where the orderer should + be deployed + displayName: Zone + path: location[0].zone + - description: MSPID is the msp id of the orderer + displayName: MSPID + path: mspID + - description: NumSecondsWarningPeriod (Optional - default 30 days) is used + to define certificate expiry warning period. + displayName: Num Seconds Warning Period + path: numSecondsWarningPeriod + - description: NodeNumber (Optional) is the number of this node in cluster - + used internally + displayName: Node Number + path: number + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: OrdererType is type of orderer you want to start + displayName: Orderer Type + path: ordererType + - description: OrgName is the organization name of the orderer + displayName: Org Name + path: orgName + - description: Region (Optional) is the region of the nodes where the orderer + should be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of orderer replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to orderer deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Enroller (Optional) is the resources provided to the enroller + container + displayName: Enroller + path: resources.enroller + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: HSMDaemon (Optional) is the resources provided to the HSM Daemon + container + displayName: HSMDaemon + path: resources.hsmdaemon + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init (Optional) is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Orderer (Optional) is the resources provided to the orderer container + displayName: Orderer + path: resources.orderer + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: GRPCProxy (Optional) is the resources provided to the proxy container + displayName: GRPCProxy + path: resources.proxy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Secret is object for msp crypto + displayName: Secret + path: secret + - description: Enrollment defines enrollment part of secret spec + displayName: Enrollment + path: secret.enrollment + - description: ClientAuth contains client uath enrollment details + displayName: Client Auth + path: secret.enrollment.clientauth + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.clientauth.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.clientauth.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.clientauth.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.clientauth.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.clientauth.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.clientauth.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.clientauth.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.clientauth.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.clientauth.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.clientauth.enrollsecret + - description: Component contains ecert enrollment details + displayName: Component + path: secret.enrollment.component + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.component.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.component.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.component.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.component.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.component.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.component.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.component.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.component.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.component.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.component.enrollsecret + - description: TLS contains tls enrollment details + displayName: TLS + path: secret.enrollment.tls + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.tls.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.tls.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.tls.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.tls.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.tls.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.tls.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.tls.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.tls.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.tls.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.tls.enrollsecret + - description: MSP defines msp part of secret spec + displayName: MSP + path: secret.msp + - description: ClientAuth contains crypto for client auth certs + displayName: Client Auth + path: secret.msp.clientauth + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.clientauth.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.clientauth.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.clientauth.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.clientauth.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.clientauth.signcerts + - description: Component contains crypto for ecerts + displayName: Component + path: secret.msp.component + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.component.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.component.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.component.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.component.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.component.signcerts + - description: TLS contains crypto for tls certs + displayName: TLS + path: secret.msp.tls + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.tls.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.tls.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.tls.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.tls.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.tls.signcerts + - description: Service (Optional) is the override object for orderer's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for CA's PVC config + displayName: Storage + path: storage + - description: Orderer (Optional) is the configuration of the storage of the + orderer + displayName: Orderer + path: storage.orderer + - description: Class is the storage class + displayName: Class + path: storage.orderer.class + - description: Size of storage + displayName: Size + path: storage.orderer.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: SystemChannelName is the name of systemchannel + displayName: System Channel Name + path: systemChannelName + - description: FabricVersion (Optional) is fabric version for the orderer + displayName: Fabric Version + path: version + - description: Zone (Optional) is the zone of the nodes where the orderer should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + - description: Blockchain Peer is the Schema for the ibppeers API. + displayName: Hyperledger Fabric Peer + kind: IBPPeer + name: ibppeers.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + - kind: clusterversions + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is object for peer actions + displayName: Action + path: action + - description: Enroll contains actions for triggering crypto enroll + displayName: Enroll + path: action.enroll + - description: Ecert is used to trigger enroll for ecert + displayName: Ecert + path: action.enroll.ecert + - description: TLSCert is used to trigger enroll for tlscert + displayName: TLSCert + path: action.enroll.tlscert + - description: Reenroll contains actions for triggering crypto reenroll + displayName: Reenroll + path: action.reenroll + - description: Ecert is used to trigger reenroll for ecert + displayName: Ecert + path: action.reenroll.ecert + - description: EcertNewKey is used to trigger reenroll for ecert and also generating + a new private key + displayName: Ecert New Key + path: action.reenroll.ecertNewKey + - description: TLSCert is used to trigger reenroll for tlscert + displayName: TLSCert + path: action.reenroll.tlscert + - description: TLSCertNewKey is used to trigger reenroll for tlscert and also + generating a new private key + displayName: TLSCert New Key + path: action.reenroll.tlscertNewKey + - description: Restart action is used to restart peer deployment + displayName: Restart + path: action.restart + - description: UpgradeDBs action is used to trigger peer node upgrade-dbs command + displayName: Upgrade DBs + path: action.upgradedbs + - description: cluster related configs Arch (Optional) is the architecture of + the nodes where peer should be deployed + displayName: Arch + path: arch + - description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + displayName: Config Override + path: configoverride + - description: CustomNames (Optional) is to use pre-configured resources for + peer's deployment + displayName: Custom Names + path: customNames + - description: PVC is the list of PVC Names to be used for peer's deployment + displayName: PVC + path: customNames.pvc + - description: Peer is the pvc to be used as peer's storage + displayName: Peer + path: customNames.pvc.peer + - description: StateDB is the pvc to be used as statedb's storage + displayName: State DB + path: customNames.pvc.statedb + - description: advanced configs DindArgs (Optional) is used to override args + passed to dind container + displayName: Dind Args + path: dindArgs + - description: DisableNodeOU (Optional) is used to switch nodeou on and off + displayName: Disable Node OU + path: disablenodeou + - description: proxy ip passed if not OCP, domain for OCP Domain is the sub-domain + used for peer's deployment + displayName: Domain + path: domain + - description: HSM (Optional) is the paramters for the HSM if being used + displayName: HSM + path: hsm + - description: PKCS11Endpoint is the endpoint for the pkcs11 proxy + displayName: PKCS11 Endpoint + path: hsm.pkcs11endpoint + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for peer's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for peer's deployment + displayName: Images + path: images + - description: BuilderImage is the name of the builder image + displayName: Builder Image + path: images.builderImage + - description: BuilderTag is the tag of the builder image + displayName: Builder Tag + path: images.builderTag + - description: CCLauncherImage is the name of the chaincode launcher image + displayName: CCLauncher Image + path: images.chaincodeLauncherImage + - description: CCLauncherTag is the tag of the chaincode launcher image + displayName: CCLauncher Tag + path: images.chaincodeLauncherTag + - description: CouchDBImage is the name of the couchdb image + displayName: Couch DBImage + path: images.couchdbImage + - description: CouchDBTag is the tag of the couchdb image + displayName: Couch DBTag + path: images.couchdbTag + - description: DindImage is the name of the dind image + displayName: Dind Image + path: images.dindImage + - description: DindTag is the tag of the dind image + displayName: Dind Tag + path: images.dindTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: images.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: images.enrollerTag + - description: FileTransferImage is the name of the file transfer image + displayName: File Transfer Image + path: images.fileTransferImage + - description: FileTransferTag is the tag of the file transfer image + displayName: File Transfer Tag + path: images.fileTransferTag + - description: FluentdImage is the name of the fluentd logger image + displayName: Fluentd Image + path: images.fluentdImage + - description: FluentdTag is the tag of the fluentd logger image + displayName: Fluentd Tag + path: images.fluentdTag + - description: GoEnvImage is the name of the goenv image + displayName: Go Env Image + path: images.goEnvImage + - description: GoEnvTag is the tag of the goenv image + displayName: Go Env Tag + path: images.goEnvTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: images.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: images.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: images.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: images.hsmTag + - description: JavaEnvImage is the name of the javaenv image + displayName: Java Env Image + path: images.javaEnvImage + - description: JavaEnvTag is the tag of the javaenv image + displayName: Java Env Tag + path: images.javaEnvTag + - description: NodeEnvImage is the name of the nodeenv image + displayName: Node Env Image + path: images.nodeEnvImage + - description: NodeEnvTag is the tag of the nodeenv image + displayName: Node Env Tag + path: images.nodeEnvTag + - description: PeerImage is the name of the peer image + displayName: Peer Image + path: images.peerImage + - description: PeerInitImage is the name of the peer init image + displayName: Peer Init Image + path: images.peerInitImage + - description: PeerInitTag is the tag of the peer init image + displayName: Peer Init Tag + path: images.peerInitTag + - description: PeerTag is the tag of the peer image + displayName: Peer Tag + path: images.peerTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: License should be accepted by the user to be able to setup Peer + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: peer specific configs MSPID is the msp id of the peer + displayName: MSPID + path: mspID + - description: msp data can be passed in secret on in spec MSPSecret (Optional) + is secret used to store msp crypto + displayName: MSPSecret + path: mspSecret + - description: NumSecondsWarningPeriod (Optional - default 30 days) is used + to define certificate expiry warning period. + displayName: Num Seconds Warning Period + path: numSecondsWarningPeriod + - description: PeerExternalEndpoint (Optional) is used to override peer external + endpoint + displayName: Peer External Endpoint + path: peerExternalEndpoint + - description: Region (Optional) is the region of the nodes where the peer should + be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of peer replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to peer deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CCLauncher (Optional) is the resources provided to the cclauncher + container + displayName: CCLauncher + path: resources.chaincodelauncher + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CouchDB (Optional) is the resources provided to the couchdb container + displayName: Couch DB + path: resources.couchdb + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: DinD (Optional) is the resources provided to the dind container + displayName: Din D + path: resources.dind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Enroller (Optional) is the resources provided to the enroller + container + displayName: Enroller + path: resources.enroller + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: FluentD (Optional) is the resources provided to the fluentd container + displayName: Fluent D + path: resources.fluentd + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: HSMDaemon (Optional) is the resources provided to the HSM Daemon + container + displayName: HSMDaemon + path: resources.hsmdaemon + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init (Optional) is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Peer (Optional) is the resources provided to the peer container + displayName: Peer + path: resources.peer + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: GRPCProxy (Optional) is the resources provided to the proxy container + displayName: GRPCProxy + path: resources.proxy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Secret is object for msp crypto + displayName: Secret + path: secret + - description: Enrollment defines enrollment part of secret spec + displayName: Enrollment + path: secret.enrollment + - description: ClientAuth contains client uath enrollment details + displayName: Client Auth + path: secret.enrollment.clientauth + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.clientauth.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.clientauth.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.clientauth.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.clientauth.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.clientauth.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.clientauth.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.clientauth.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.clientauth.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.clientauth.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.clientauth.enrollsecret + - description: Component contains ecert enrollment details + displayName: Component + path: secret.enrollment.component + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.component.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.component.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.component.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.component.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.component.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.component.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.component.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.component.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.component.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.component.enrollsecret + - description: TLS contains tls enrollment details + displayName: TLS + path: secret.enrollment.tls + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.tls.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.tls.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.tls.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.tls.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.tls.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.tls.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.tls.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.tls.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.tls.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.tls.enrollsecret + - description: MSP defines msp part of secret spec + displayName: MSP + path: secret.msp + - description: ClientAuth contains crypto for client auth certs + displayName: Client Auth + path: secret.msp.clientauth + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.clientauth.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.clientauth.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.clientauth.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.clientauth.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.clientauth.signcerts + - description: Component contains crypto for ecerts + displayName: Component + path: secret.msp.component + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.component.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.component.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.component.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.component.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.component.signcerts + - description: TLS contains crypto for tls certs + displayName: TLS + path: secret.msp.tls + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.tls.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.tls.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.tls.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.tls.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.tls.signcerts + - description: Service (Optional) is the override object for peer's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: StateDb (Optional) is the statedb used for peer, can be couchdb + or leveldb + displayName: State Db + path: stateDb + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for peer's PVC config + displayName: Storage + path: storage + - description: Peer (Optional) is the configuration of the storage of the peer + displayName: Peer + path: storage.peer + - description: Class is the storage class + displayName: Class + path: storage.peer.class + - description: Size of storage + displayName: Size + path: storage.peer.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: StateDB (Optional) is the configuration of the storage of the + statedb + displayName: State DB + path: storage.statedb + - description: Class is the storage class + displayName: Class + path: storage.statedb.class + - description: Size of storage + displayName: Size + path: storage.statedb.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: FabricVersion (Optional) is fabric version for the peer + displayName: Fabric Version + path: version + - description: Zone (Optional) is the zone of the nodes where the peer should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + description: TODO + displayName: Fabric Opensource Operator + icon: + - base64data: PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIzMiIgaGVpZ2h0PSIzMiIgdmlld0JveD0iMCAwIDMyIDMyIj48cGF0aCBkPSJNMTYsMEExNiwxNiwwLDEsMCwzMiwxNiwxNiwxNiwwLDAsMCwxNiwwWk05LDIySDdWMTBIOVptMTMsM0gxMFYyM0gyMlpNMjIsOUgxMFY3SDIyWm0zLDEzSDIzVjEwaDJaIiBzdHlsZT0iZmlsbDojMTMxNzFhIi8+PC9zdmc+Cg== + mediatype: image/svg+xml + install: + spec: + deployments: + - label: + control-plane: controller-manager + name: operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + name: controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + control-plane: controller-manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - s390x + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=10 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + resources: {} + - args: + - --metrics-addr=127.0.0.1:8080 + - --enable-leader-election + command: + - /manager + image: controller:latest + name: manager + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi + - command: + - ibp-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: operator + - name: CLUSTERTYPE + value: OPENSHIFT + image: todo:update + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 10 + tcpSocket: + port: 8383 + timeoutSeconds: 5 + name: operator + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + tcpSocket: + port: 8383 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + ephemeral-storage: 1Gi + memory: 200Mi + requests: + cpu: 100m + ephemeral-storage: 100Mi + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 1001 + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1001 + serviceAccountName: operator-controller-manager + terminationGracePeriodSeconds: 10 + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - hyperledger + - fabric + maturity: alpha + provider: + name: Opensource + version: 1.0.0 diff --git a/bundle/manifests/ibp.com_ibpcas.yaml b/bundle/manifests/ibp.com_ibpcas.yaml new file mode 100644 index 00000000..0796f93c --- /dev/null +++ b/bundle/manifests/ibp.com_ibpcas.yaml @@ -0,0 +1,388 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibpcas.ibp.com +spec: + group: ibp.com + names: + kind: IBPCA + listKind: IBPCAList + plural: ibpcas + singular: ibpca + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: 'Certificate Authorities issue certificates for all the identities + to transact on the network. Warning: CA deployment using this tile is not + supported. Please use the IBP Console to deploy a CA.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPCASpec defines the desired state of IBP CA + properties: + action: + description: Action (Optional) is action object for trigerring actions + properties: + renew: + description: Renew action is object for certificate renewals + properties: + tlscert: + description: TLSCert action is used to renew TLS crypto for + CA server + type: boolean + type: object + restart: + description: Restart action is used to restart the running CA + type: boolean + type: object + arch: + description: Arch (Optional) is the architecture of the nodes where + CA should be deployed + items: + type: string + type: array + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + to CA & TLSCA config + properties: + ca: + description: CA (Optional) is the overrides to CA's configuration + type: object + x-kubernetes-preserve-unknown-fields: true + maxnamelength: + description: MaxNameLength (Optional) is the maximum length of + the name that the CA can have + type: integer + tlsca: + description: TLSCA (Optional) is the overrides to TLSCA's configuration + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + customNames: + description: CustomNames (Optional) is to use pre-configured resources + for CA's deployment + properties: + pvc: + description: PVC is the list of PVC Names to be used for CA's + deployment + properties: + ca: + description: CA is the pvc to be used as CA's storage + type: string + type: object + sqlitepath: + description: Sqlite is the sqlite path to be used for CA's deployment + type: string + type: object + domain: + description: Domain is the sub-domain used for CA's deployment + type: string + hsm: + description: HSM (Optional) is DEPRECATED + properties: + pkcs11endpoint: + description: PKCS11Endpoint is DEPRECATED + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for CA's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for CA's + deployment + properties: + caImage: + description: CAImage is the name of the CA image + type: string + caInitImage: + description: CAInitImage is the name of the Init image + type: string + caInitTag: + description: CAInitTag is the tag of the Init image + type: string + caTag: + description: CATag is the tag of the CA image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image for crypto + generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image for crypto + generation + type: string + hsmImage: + description: HSMImage is the name of the HSM image + type: string + hsmTag: + description: HSMTag is the tag of the HSM image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + license: + description: License should be accepted by the user to be able to + setup CA + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + numSecondsWarningPeriod: + description: NumSecondsWarningPeriod (Optional - default 30 days) + is used to define certificate expiry warning period. + format: int64 + type: integer + region: + description: Region (Optional) is the region of the nodes where the + CA should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of CA replicas + to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to CA deployment + properties: + ca: + description: CA is the resources provided to the CA container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + enrollJob: + description: EnrollJJob is the resources provided to the enroll + job container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + hsmDaemon: + description: HSMDaemon is the resources provided to the HSM daemon + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init is the resources provided to the init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + service: + description: Service (Optional) is the override object for CA's service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for CA's PVC config + properties: + ca: + description: CA is the configuration of the storage of the CA + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + version: + description: FabricVersion (Optional) set the fabric version you want + to use. + type: string + zone: + description: Zone (Optional) is the zone of the nodes where the CA + should be deployed + type: string + required: + - license + - version + type: object + status: + description: Status is the observed state of IBPCA + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/bundle/manifests/ibp.com_ibpconsoles.yaml b/bundle/manifests/ibp.com_ibpconsoles.yaml new file mode 100644 index 00000000..5423ea88 --- /dev/null +++ b/bundle/manifests/ibp.com_ibpconsoles.yaml @@ -0,0 +1,803 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibpconsoles.ibp.com +spec: + group: ibp.com + names: + kind: IBPConsole + listKind: IBPConsoleList + plural: ibpconsoles + singular: ibpconsole + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: The Console is used to deploy and manage the CA, peer, ordering + nodes. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPConsoleSpec defines the desired state of IBPConsole + properties: + action: + description: Action (Optional) is action object for trigerring actions + properties: + restart: + type: boolean + type: object + allowDefaultPassword: + description: AllowDefaultPassword, if true, will bypass the password + reset flow on the first connection to the console GUI. By default + (false), all consoles require a password reset at the first login. + type: boolean + arch: + description: Arch (Optional) is the architecture of the nodes where + console should be deployed + items: + type: string + type: array + authScheme: + description: console settings AuthScheme is auth scheme for console + access + type: string + clusterdata: + description: ClusterData is object cluster data information + properties: + namespace: + type: string + type: + description: Type provides the type of cluster + type: string + zones: + description: Zones provides the zones available + items: + type: string + type: array + type: object + components: + description: Components is database name used for components + type: string + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + properties: + console: + description: Console is the overrides to console configuration + type: object + x-kubernetes-preserve-unknown-fields: true + deployer: + description: Deployer is the overrides to deployer configuration + type: object + x-kubernetes-preserve-unknown-fields: true + maxnamelength: + description: MaxNameLength (Optional) is the maximum length of + the name that the console can have + type: integer + type: object + configtxlator: + description: ConfigtxlatorURL is url for configtxlator server + type: string + connectionString: + description: ConnectionString is connection url for backend database + type: string + crn: + properties: + account_id: + type: string + c_name: + type: string + c_type: + type: string + instance_id: + type: string + location: + type: string + resource_id: + type: string + resource_type: + type: string + service_name: + type: string + version: + type: string + type: object + deployer: + description: Deployer is object for deployer configs + properties: + components_db: + type: string + connectionstring: + type: string + create_db: + type: boolean + domain: + type: string + type: object + deployerTimeout: + description: DeployerTimeout is timeout value for deployer calls + format: int32 + type: integer + deployerUrl: + description: DeployerURL is url for deployer server + type: string + email: + description: Email is the email used for initial access + type: string + featureflags: + description: FeatureFlags is object for feature flag settings + properties: + capabilities_enabled: + type: boolean + create_channel_enabled: + type: boolean + dev_mode: + type: boolean + enable_ou_identifier: + type: boolean + high_availability: + type: boolean + hsm_enabled: + type: boolean + import_only_enabled: + type: boolean + infra_import_options: + properties: + platform: + type: string + supported_cas: + items: + type: string + type: array + supported_orderers: + items: + type: string + type: array + supported_peers: + items: + type: string + type: array + type: object + lifecycle2_0_enabled: + type: boolean + mustgather_enabled: + type: boolean + patch_1_4to2_x_enabled: + type: boolean + read_only_enabled: + type: boolean + remote_peer_config_enabled: + type: boolean + saas_enabled: + type: boolean + scale_raft_nodes_enabled: + type: boolean + templates_enabled: + type: boolean + type: object + iamApiKey: + type: string + ibmid: + properties: + client_id: + type: string + client_secret: + type: string + url: + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for console's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for console's + deployment + properties: + configtxlatorImage: + description: ConfigtxlatorImage is the name of the configtxlator + image + type: string + configtxlatorTag: + description: ConfigtxlatorTag is the tag of the configtxlator + image + type: string + consoleImage: + description: ConsoleImage is the name of the console image + type: string + consoleInitImage: + description: ConsoleInitImage is the name of the console init + image + type: string + consoleInitTag: + description: ConsoleInitTag is the tag of the console init image + type: string + consoleTag: + description: ConsoleTag is the tag of the console image + type: string + couchdbImage: + description: CouchDBImage is the name of the couchdb image + type: string + couchdbTag: + description: CouchDBTag is the tag of the couchdb image + type: string + deployerImage: + description: DeployerImage is the name of the deployer image + type: string + deployerTag: + description: DeployerTag is the tag of the deployer image + type: string + mustgatherImage: + description: MustgatherImage is the name of the mustgather image + type: string + mustgatherTag: + description: MustgatherTag is the tag of the mustgatherTag image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + kubeconfig: + format: byte + type: string + kubeconfignamespace: + type: string + kubeconfigsecretname: + type: string + license: + description: License should be accepted by the user to be able to + setup console + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + networkinfo: + description: NetworkInfo is object for network overrides + properties: + configtxlatorPort: + description: ConfigtxlatorPort is the port to access configtxlator + format: int32 + type: integer + consolePort: + description: ConsolePort is the port to access the console + format: int32 + type: integer + domain: + description: Domain for the components + type: string + proxyPort: + description: ProxyPort is the port to access console proxy + format: int32 + type: integer + type: object + password: + description: Password is initial password to access console + type: string + passwordSecretName: + description: PasswordSecretName is secretname where password is stored + type: string + proxying: + type: boolean + region: + description: Region (Optional) is the region of the nodes where the + console should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of console + replicas to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to console deployment + properties: + configtxlator: + description: Configtxlator is the resources provided to the configtxlator + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + console: + description: Console is the resources provided to the console + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + couchdb: + description: CouchDB is the resources provided to the couchdb + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + deployer: + description: Deployer is the resources provided to the deployer + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init is the resources provided to the init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + segmentWriteKey: + type: string + service: + description: Service (Optional) is the override object for console's + service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + serviceAccountName: + description: ServiceAccountName defines serviceaccount used for console + deployment + type: string + sessions: + description: Sessions is sessions database name to use + type: string + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for CA's PVC config + properties: + console: + description: Console is the configuration of the storage of the + console + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + system: + description: System is system database name to use + type: string + systemChannel: + description: SystemChannel is default systemchannel name + type: string + tlsSecretName: + description: TLSSecretName is secret name to load custom tls certs + type: string + usetags: + description: UseTags (Optional) is a flag to switch between image + digests and tags + type: boolean + version: + description: Version (Optional) is version for the console + type: string + versions: + properties: + ca: + additionalProperties: + properties: + default: + type: boolean + image: + description: CAImages is the list of images to be used in + CA deployment + properties: + caImage: + description: CAImage is the name of the CA image + type: string + caInitImage: + description: CAInitImage is the name of the Init image + type: string + caInitTag: + description: CAInitTag is the tag of the Init image + type: string + caTag: + description: CATag is the tag of the CA image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image + for crypto generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image + for crypto generation + type: string + hsmImage: + description: HSMImage is the name of the HSM image + type: string + hsmTag: + description: HSMTag is the tag of the HSM image + type: string + type: object + version: + type: string + required: + - default + - version + type: object + type: object + orderer: + additionalProperties: + properties: + default: + type: boolean + image: + description: OrdererImages is the list of images to be used + in orderer deployment + properties: + enrollerImage: + description: EnrollerImage is the name of the init image + for crypto generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image + for crypto generation + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web + proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy + image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + ordererImage: + description: OrdererImage is the name of the orderer + image + type: string + ordererInitImage: + description: OrdererInitImage is the name of the orderer + init image + type: string + ordererInitTag: + description: OrdererInitTag is the tag of the orderer + init image + type: string + ordererTag: + description: OrdererTag is the tag of the orderer image + type: string + type: object + version: + type: string + required: + - default + - version + type: object + type: object + peer: + additionalProperties: + properties: + default: + type: boolean + image: + description: PeerImages is the list of images to be used + in peer deployment + properties: + builderImage: + description: BuilderImage is the name of the builder + image + type: string + builderTag: + description: BuilderTag is the tag of the builder image + type: string + chaincodeLauncherImage: + description: CCLauncherImage is the name of the chaincode + launcher image + type: string + chaincodeLauncherTag: + description: CCLauncherTag is the tag of the chaincode + launcher image + type: string + couchdbImage: + description: CouchDBImage is the name of the couchdb + image + type: string + couchdbTag: + description: CouchDBTag is the tag of the couchdb image + type: string + dindImage: + description: DindImage is the name of the dind image + type: string + dindTag: + description: DindTag is the tag of the dind image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image + for crypto generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image + for crypto generation + type: string + fileTransferImage: + description: FileTransferImage is the name of the file + transfer image + type: string + fileTransferTag: + description: FileTransferTag is the tag of the file + transfer image + type: string + fluentdImage: + description: FluentdImage is the name of the fluentd + logger image + type: string + fluentdTag: + description: FluentdTag is the tag of the fluentd logger + image + type: string + goEnvImage: + description: GoEnvImage is the name of the goenv image + type: string + goEnvTag: + description: GoEnvTag is the tag of the goenv image + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web + proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy + image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + javaEnvImage: + description: JavaEnvImage is the name of the javaenv + image + type: string + javaEnvTag: + description: JavaEnvTag is the tag of the javaenv image + type: string + nodeEnvImage: + description: NodeEnvImage is the name of the nodeenv + image + type: string + nodeEnvTag: + description: NodeEnvTag is the tag of the nodeenv image + type: string + peerImage: + description: PeerImage is the name of the peer image + type: string + peerInitImage: + description: PeerInitImage is the name of the peer init + image + type: string + peerInitTag: + description: PeerInitTag is the tag of the peer init + image + type: string + peerTag: + description: PeerTag is the tag of the peer image + type: string + type: object + version: + type: string + required: + - default + - version + type: object + type: object + required: + - ca + - orderer + - peer + type: object + zone: + description: Zone (Optional) is the zone of the nodes where the console + should be deployed + type: string + required: + - license + - usetags + - version + type: object + status: + description: Status is the observed state of IBPConsole + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/bundle/manifests/ibp.com_ibporderers.yaml b/bundle/manifests/ibp.com_ibporderers.yaml new file mode 100644 index 00000000..daa06230 --- /dev/null +++ b/bundle/manifests/ibp.com_ibporderers.yaml @@ -0,0 +1,926 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibporderers.ibp.com +spec: + group: ibp.com + names: + kind: IBPOrderer + listKind: IBPOrdererList + plural: ibporderers + singular: ibporderer + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: 'Ordering nodes create the blocks that form the ledger and send + them to peers. Warning: Orderer deployment using this tile is not supported. + Please use the IBP Console to deploy an orderer.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPOrdererSpec defines the desired state of IBPOrderer + properties: + action: + description: Action (Optional) is object for orderer actions + properties: + enroll: + description: Enroll contains actions for triggering crypto enroll + properties: + ecert: + description: Ecert is used to trigger enroll for ecert + type: boolean + tlscert: + description: TLSCert is used to trigger enroll for tls certs + type: boolean + type: object + reenroll: + description: Reenroll contains actions for triggering crypto reenroll + properties: + ecert: + description: Ecert is used to trigger reenroll for ecert + type: boolean + ecertNewKey: + description: EcertNewKey is used to trigger reenroll for ecert + and also generating a new private key + type: boolean + tlscert: + description: TLSCert is used to trigger reenroll for tlscert + type: boolean + tlscertNewKey: + description: TLSCertNewKey is used to trigger reenroll for + tlscert and also generating a new private key + type: boolean + type: object + restart: + description: Restart action is used to restart orderer deployment + type: boolean + type: object + arch: + description: Arch (Optional) is the architecture of the nodes where + orderer should be deployed + items: + type: string + type: array + clusterSize: + description: ClusterSize (Optional) number of orderers if a cluster + type: integer + clusterconfigoverride: + description: ClusterConfigOverride (Optional) is array of config overrides + for cluster + items: + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + clustersecret: + description: ClusterSecret (Optional) is array of msp crypto for cluster + items: + description: SecretSpec defines the crypto spec to pass to components + properties: + enrollment: + description: Enrollment defines enrollment part of secret spec + properties: + clientauth: + description: ClientAuth contains client uath enrollment + details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + component: + description: Component contains ecert enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + tls: + description: TLS contains tls enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + type: object + msp: + description: MSP defines msp part of secret spec + properties: + clientauth: + description: ClientAuth contains crypto for client auth + certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts + array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + component: + description: Component contains crypto for ecerts + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts + array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + tls: + description: TLS contains crypto for tls certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts + array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + type: object + type: object + type: array + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + type: object + x-kubernetes-preserve-unknown-fields: true + customNames: + description: CustomNames (Optional) is to use pre-configured resources + for orderer's deployment + properties: + pvc: + description: PVC is the list of PVC Names to be used for orderer's + deployment + properties: + orderer: + description: Orderer is the pvc to be used as orderer's storage + type: string + type: object + type: object + disablenodeou: + description: DisableNodeOU (Optional) is used to switch nodeou on + and off + type: boolean + domain: + description: Domain is the sub-domain used for orderer's deployment + type: string + externalAddress: + description: ExternalAddress (Optional) is used internally + type: string + genesisBlock: + description: GenesisBlock (Optional) is genesis block to start the + orderer + type: string + genesisProfile: + type: string + hsm: + description: HSM (Optional) is DEPRECATED + properties: + pkcs11endpoint: + description: PKCS11Endpoint is DEPRECATED + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for orderer's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for orderer's + deployment + properties: + enrollerImage: + description: EnrollerImage is the name of the init image for crypto + generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image for crypto + generation + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + ordererImage: + description: OrdererImage is the name of the orderer image + type: string + ordererInitImage: + description: OrdererInitImage is the name of the orderer init + image + type: string + ordererInitTag: + description: OrdererInitTag is the tag of the orderer init image + type: string + ordererTag: + description: OrdererTag is the tag of the orderer image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + isprecreate: + description: IsPrecreate (Optional) defines if orderer is in precreate + state + type: boolean + license: + description: License should be accepted by the user to be able to + setup orderer + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + location: + description: ClusterLocation (Optional) is array of cluster location + settings for cluster + items: + description: IBPOrdererClusterLocation (Optional) is object of cluster + location settings for cluster + properties: + region: + description: Region (Optional) is the region of the nodes where + the orderer should be deployed + type: string + zone: + description: Zone (Optional) is the zone of the nodes where + the orderer should be deployed + type: string + type: object + type: array + mspID: + description: MSPID is the msp id of the orderer + type: string + numSecondsWarningPeriod: + description: NumSecondsWarningPeriod (Optional - default 30 days) + is used to define certificate expiry warning period. + format: int64 + type: integer + number: + description: NodeNumber (Optional) is the number of this node in cluster + - used internally + type: integer + ordererType: + description: OrdererType is type of orderer you want to start + type: string + orgName: + description: OrgName is the organization name of the orderer + type: string + region: + description: Region (Optional) is the region of the nodes where the + orderer should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of orderer + replicas to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to orderer deployment + properties: + enroller: + description: Enroller (Optional) is the resources provided to + the enroller container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + hsmdaemon: + description: HSMDaemon (Optional) is the resources provided to + the HSM Daemon container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init (Optional) is the resources provided to the + init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + orderer: + description: Orderer (Optional) is the resources provided to the + orderer container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + proxy: + description: GRPCProxy (Optional) is the resources provided to + the proxy container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + secret: + description: Secret is object for msp crypto + properties: + enrollment: + description: Enrollment defines enrollment part of secret spec + properties: + clientauth: + description: ClientAuth contains client uath enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + component: + description: Component contains ecert enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + tls: + description: TLS contains tls enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + type: object + msp: + description: MSP defines msp part of secret spec + properties: + clientauth: + description: ClientAuth contains crypto for client auth certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + component: + description: Component contains crypto for ecerts + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + tls: + description: TLS contains crypto for tls certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + type: object + type: object + service: + description: Service (Optional) is the override object for orderer's + service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for CA's PVC config + properties: + orderer: + description: Orderer (Optional) is the configuration of the storage + of the orderer + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + systemChannelName: + description: SystemChannelName is the name of systemchannel + type: string + useChannelLess: + type: boolean + version: + description: FabricVersion (Optional) is fabric version for the orderer + type: string + zone: + description: Zone (Optional) is the zone of the nodes where the orderer + should be deployed + type: string + required: + - license + - version + type: object + status: + description: IBPOrdererStatus defines the observed state of IBPOrderer + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/bundle/manifests/ibp.com_ibppeers.yaml b/bundle/manifests/ibp.com_ibppeers.yaml new file mode 100644 index 00000000..37a8b968 --- /dev/null +++ b/bundle/manifests/ibp.com_ibppeers.yaml @@ -0,0 +1,862 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibppeers.ibp.com +spec: + group: ibp.com + names: + kind: IBPPeer + listKind: IBPPeerList + plural: ibppeers + singular: ibppeer + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: 'IBPPeer is the Schema for the ibppeers API. Warning: Peer deployment + using this tile is not supported. Please use the IBP Console to deploy a + Peer.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPPeerSpec defines the desired state of IBPPeer + properties: + action: + description: Action (Optional) is object for peer actions + properties: + enroll: + description: Enroll contains actions for triggering crypto enroll + properties: + ecert: + description: Ecert is used to trigger enroll for ecert + type: boolean + tlscert: + description: TLSCert is used to trigger enroll for tlscert + type: boolean + type: object + reenroll: + description: Reenroll contains actions for triggering crypto reenroll + properties: + ecert: + description: Ecert is used to trigger reenroll for ecert + type: boolean + ecertNewKey: + description: EcertNewKey is used to trigger reenroll for ecert + and also generating a new private key + type: boolean + tlscert: + description: TLSCert is used to trigger reenroll for tlscert + type: boolean + tlscertNewKey: + description: TLSCertNewKey is used to trigger reenroll for + tlscert and also generating a new private key + type: boolean + type: object + restart: + description: Restart action is used to restart peer deployment + type: boolean + upgradedbs: + description: UpgradeDBs action is used to trigger peer node upgrade-dbs + command + type: boolean + type: object + arch: + description: cluster related configs Arch (Optional) is the architecture + of the nodes where peer should be deployed + items: + type: string + type: array + chaincodeBuilderConfig: + additionalProperties: + type: string + description: ChaincodeBuilderConfig (Optional) is a k/v map providing + a scope for template substitutions defined in chaincode-as-a-service + package metadata files. The map will be serialized as JSON and set + in the peer deployment CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG env + variable. + type: object + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + type: object + x-kubernetes-preserve-unknown-fields: true + customNames: + description: CustomNames (Optional) is to use pre-configured resources + for peer's deployment + properties: + pvc: + description: PVC is the list of PVC Names to be used for peer's + deployment + properties: + peer: + description: Peer is the pvc to be used as peer's storage + type: string + statedb: + description: StateDB is the pvc to be used as statedb's storage + type: string + type: object + type: object + dindArgs: + description: advanced configs DindArgs (Optional) is used to override + args passed to dind container + items: + type: string + type: array + disablenodeou: + description: DisableNodeOU (Optional) is used to switch nodeou on + and off + type: boolean + domain: + description: proxy ip passed if not OCP, domain for OCP Domain is + the sub-domain used for peer's deployment + type: string + hsm: + description: HSM (Optional) is DEPRECATED + properties: + pkcs11endpoint: + description: PKCS11Endpoint is DEPRECATED + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for peer's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for peer's + deployment + properties: + builderImage: + description: BuilderImage is the name of the builder image + type: string + builderTag: + description: BuilderTag is the tag of the builder image + type: string + chaincodeLauncherImage: + description: CCLauncherImage is the name of the chaincode launcher + image + type: string + chaincodeLauncherTag: + description: CCLauncherTag is the tag of the chaincode launcher + image + type: string + couchdbImage: + description: CouchDBImage is the name of the couchdb image + type: string + couchdbTag: + description: CouchDBTag is the tag of the couchdb image + type: string + dindImage: + description: DindImage is the name of the dind image + type: string + dindTag: + description: DindTag is the tag of the dind image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image for crypto + generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image for crypto + generation + type: string + fileTransferImage: + description: FileTransferImage is the name of the file transfer + image + type: string + fileTransferTag: + description: FileTransferTag is the tag of the file transfer image + type: string + fluentdImage: + description: FluentdImage is the name of the fluentd logger image + type: string + fluentdTag: + description: FluentdTag is the tag of the fluentd logger image + type: string + goEnvImage: + description: GoEnvImage is the name of the goenv image + type: string + goEnvTag: + description: GoEnvTag is the tag of the goenv image + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + javaEnvImage: + description: JavaEnvImage is the name of the javaenv image + type: string + javaEnvTag: + description: JavaEnvTag is the tag of the javaenv image + type: string + nodeEnvImage: + description: NodeEnvImage is the name of the nodeenv image + type: string + nodeEnvTag: + description: NodeEnvTag is the tag of the nodeenv image + type: string + peerImage: + description: PeerImage is the name of the peer image + type: string + peerInitImage: + description: PeerInitImage is the name of the peer init image + type: string + peerInitTag: + description: PeerInitTag is the tag of the peer init image + type: string + peerTag: + description: PeerTag is the tag of the peer image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + license: + description: License should be accepted by the user to be able to + setup Peer + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + mspID: + description: peer specific configs MSPID is the msp id of the peer + type: string + mspSecret: + description: msp data can be passed in secret on in spec MSPSecret + (Optional) is secret used to store msp crypto + type: string + numSecondsWarningPeriod: + description: NumSecondsWarningPeriod (Optional - default 30 days) + is used to define certificate expiry warning period. + format: int64 + type: integer + peerExternalEndpoint: + description: PeerExternalEndpoint (Optional) is used to override peer + external endpoint + type: string + region: + description: Region (Optional) is the region of the nodes where the + peer should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of peer + replicas to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to peer deployment + properties: + chaincodelauncher: + description: CCLauncher (Optional) is the resources provided to + the cclauncher container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + couchdb: + description: CouchDB (Optional) is the resources provided to the + couchdb container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + dind: + description: DinD (Optional) is the resources provided to the + dind container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + enroller: + description: Enroller (Optional) is the resources provided to + the enroller container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + fluentd: + description: FluentD (Optional) is the resources provided to the + fluentd container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + hsmdaemon: + description: HSMDaemon (Optional) is the resources provided to + the HSM Daemon container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init (Optional) is the resources provided to the + init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + peer: + description: / Peer (Optional) is the resources provided to the + peer container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + proxy: + description: GRPCProxy (Optional) is the resources provided to + the proxy container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + secret: + description: Secret is object for msp crypto + properties: + enrollment: + description: Enrollment defines enrollment part of secret spec + properties: + clientauth: + description: ClientAuth contains client uath enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + component: + description: Component contains ecert enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + tls: + description: TLS contains tls enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + type: object + msp: + description: MSP defines msp part of secret spec + properties: + clientauth: + description: ClientAuth contains crypto for client auth certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + component: + description: Component contains crypto for ecerts + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + tls: + description: TLS contains crypto for tls certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + type: object + type: object + service: + description: Service (Optional) is the override object for peer's + service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + stateDb: + description: StateDb (Optional) is the statedb used for peer, can + be couchdb or leveldb + type: string + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for peer's PVC config + properties: + peer: + description: Peer (Optional) is the configuration of the storage + of the peer + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + statedb: + description: StateDB (Optional) is the configuration of the storage + of the statedb + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + version: + description: FabricVersion (Optional) is fabric version for the peer + type: string + zone: + description: Zone (Optional) is the zone of the nodes where the peer + should be deployed + type: string + required: + - license + - version + type: object + status: + description: IBPPeerStatus defines the observed state of IBPPeer + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/bundle/manifests/operator-controller-manager-metrics-service_v1_service.yaml b/bundle/manifests/operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 00000000..4d9c57f5 --- /dev/null +++ b/bundle/manifests/operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + control-plane: controller-manager + name: operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager +status: + loadBalancer: {} diff --git a/bundle/manifests/operator-ibm-hlfsupport_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml b/bundle/manifests/operator-ibm-hlfsupport_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml new file mode 100644 index 00000000..8c8d3c68 --- /dev/null +++ b/bundle/manifests/operator-ibm-hlfsupport_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + name: operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator +subjects: + - kind: ServiceAccount + name: operator + namespace: placeholder diff --git a/bundle/manifests/operator-leader-election-role_rbac.authorization.k8s.io_v1_role.yaml b/bundle/manifests/operator-leader-election-role_rbac.authorization.k8s.io_v1_role.yaml new file mode 100644 index 00000000..ebf06dea --- /dev/null +++ b/bundle/manifests/operator-leader-election-role_rbac.authorization.k8s.io_v1_role.yaml @@ -0,0 +1,39 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + name: operator-leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get diff --git a/bundle/manifests/operator-leader-election-rolebinding_rbac.authorization.k8s.io_v1_rolebinding.yaml b/bundle/manifests/operator-leader-election-rolebinding_rbac.authorization.k8s.io_v1_rolebinding.yaml new file mode 100644 index 00000000..2d5af2d3 --- /dev/null +++ b/bundle/manifests/operator-leader-election-rolebinding_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + name: operator-leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operator-leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/bundle/manifests/operator-manager-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/operator-manager-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000..39f274e9 --- /dev/null +++ b/bundle/manifests/operator-manager-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,187 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: operator-manager-role +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get +- apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - "" + resources: + - pods + - pods/log + - persistentvolumeclaims + - persistentvolumes + - services + - endpoints + - events + - configmaps + - secrets + - nodes + - serviceaccounts + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - authorization.openshift.io + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - bind + - escalate +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - ibp.com + resources: + - ibpcas.ibp.com + - ibppeers.ibp.com + - ibporderers.ibp.com + - ibpconsoles.ibp.com + - ibpcas + - ibppeers + - ibporderers + - ibpconsoles + - ibpcas/finalizers + - ibppeers/finalizers + - ibporderers/finalizers + - ibpconsoles/finalizers + - ibpcas/status + - ibppeers/status + - ibporderers/status + - ibpconsoles/status + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - extensions + - networking.k8s.io + - config.openshift.io + resources: + - ingresses + - networkpolicies + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000..298cb959 --- /dev/null +++ b/bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1beta1_clusterrole.yaml b/bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1beta1_clusterrole.yaml new file mode 100644 index 00000000..58921ed1 --- /dev/null +++ b/bundle/manifests/operator-metrics-reader_rbac.authorization.k8s.io_v1beta1_clusterrole.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: operator-metrics-reader +rules: + - nonResourceURLs: + - /metrics + verbs: + - get diff --git a/bundle/manifests/operator-operator_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml b/bundle/manifests/operator-operator_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml new file mode 100644 index 00000000..6c1a045c --- /dev/null +++ b/bundle/manifests/operator-operator_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + name: operator-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator +subjects: +- kind: ServiceAccount + name: operator + namespace: placeholder diff --git a/bundle/manifests/operator-proxy-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/operator-proxy-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000..4e50e3d9 --- /dev/null +++ b/bundle/manifests/operator-proxy-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: operator-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/bundle/manifests/operator-proxy-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml b/bundle/manifests/operator-proxy-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml new file mode 100644 index 00000000..2c1cb45e --- /dev/null +++ b/bundle/manifests/operator-proxy-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + name: operator-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/bundle/metadata/annotations.yaml b/bundle/metadata/annotations.yaml new file mode 100644 index 00000000..5e677aaa --- /dev/null +++ b/bundle/metadata/annotations.yaml @@ -0,0 +1,14 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: fabric-opensource-operator + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.19.0+git + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/cmd/crd/main.go b/cmd/crd/main.go new file mode 100644 index 00000000..7b6cf1b6 --- /dev/null +++ b/cmd/crd/main.go @@ -0,0 +1,38 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "os" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/command" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +func main() { + crdsPath := "../../config/crd/bases" + err := command.CRDInstall(crdsPath) + if err != nil { + fmt.Printf("failed to create crds: %s\n", err) + time.Sleep(15 * time.Second) + os.Exit(1) + } +} diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 00000000..58db114f --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,26 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for +# breaking changes +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 00000000..bebea5a5 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 00000000..90d7c313 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames diff --git a/config/crd/bases/ibp.com_ibpcas.yaml b/config/crd/bases/ibp.com_ibpcas.yaml new file mode 100644 index 00000000..f8850b2f --- /dev/null +++ b/config/crd/bases/ibp.com_ibpcas.yaml @@ -0,0 +1,389 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibpcas.ibp.com +spec: + group: ibp.com + names: + kind: IBPCA + listKind: IBPCAList + plural: ibpcas + singular: ibpca + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: 'Certificate Authorities issue certificates for all the identities + to transact on the network. Warning: CA deployment using this tile is not + supported. Please use the IBP Console to deploy a CA.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPCASpec defines the desired state of IBP CA + properties: + action: + description: Action (Optional) is action object for trigerring actions + properties: + renew: + description: Renew action is object for certificate renewals + properties: + tlscert: + description: TLSCert action is used to renew TLS crypto for + CA server + type: boolean + type: object + restart: + description: Restart action is used to restart the running CA + type: boolean + type: object + arch: + description: Arch (Optional) is the architecture of the nodes where + CA should be deployed + items: + type: string + type: array + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + to CA & TLSCA config + properties: + ca: + description: CA (Optional) is the overrides to CA's configuration + type: object + x-kubernetes-preserve-unknown-fields: true + maxnamelength: + description: MaxNameLength (Optional) is the maximum length of + the name that the CA can have + type: integer + tlsca: + description: TLSCA (Optional) is the overrides to TLSCA's configuration + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + customNames: + description: CustomNames (Optional) is to use pre-configured resources + for CA's deployment + properties: + pvc: + description: PVC is the list of PVC Names to be used for CA's + deployment + properties: + ca: + description: CA is the pvc to be used as CA's storage + type: string + type: object + sqlitepath: + description: Sqlite is the sqlite path to be used for CA's deployment + type: string + type: object + domain: + description: Domain is the sub-domain used for CA's deployment + type: string + hsm: + description: HSM (Optional) is DEPRECATED + properties: + pkcs11endpoint: + description: PKCS11Endpoint is DEPRECATED + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for CA's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for CA's + deployment + properties: + caImage: + description: CAImage is the name of the CA image + type: string + caInitImage: + description: CAInitImage is the name of the Init image + type: string + caInitTag: + description: CAInitTag is the tag of the Init image + type: string + caTag: + description: CATag is the tag of the CA image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image for crypto + generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image for crypto + generation + type: string + hsmImage: + description: HSMImage is the name of the HSM image + type: string + hsmTag: + description: HSMTag is the tag of the HSM image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + license: + description: License should be accepted by the user to be able to + setup CA + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + numSecondsWarningPeriod: + description: NumSecondsWarningPeriod (Optional - default 30 days) + is used to define certificate expiry warning period. + format: int64 + type: integer + region: + description: Region (Optional) is the region of the nodes where the + CA should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of CA replicas + to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to CA deployment + properties: + ca: + description: CA is the resources provided to the CA container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + enrollJob: + description: EnrollJJob is the resources provided to the enroll + job container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + hsmDaemon: + description: HSMDaemon is the resources provided to the HSM daemon + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init is the resources provided to the init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + service: + description: Service (Optional) is the override object for CA's service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for CA's PVC config + properties: + ca: + description: CA is the configuration of the storage of the CA + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + version: + description: FabricVersion (Optional) set the fabric version you want + to use. + type: string + zone: + description: Zone (Optional) is the zone of the nodes where the CA + should be deployed + type: string + required: + - license + - version + type: object + status: + description: Status is the observed state of IBPCA + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/ibp.com_ibpconsoles.yaml b/config/crd/bases/ibp.com_ibpconsoles.yaml new file mode 100644 index 00000000..b7d6f927 --- /dev/null +++ b/config/crd/bases/ibp.com_ibpconsoles.yaml @@ -0,0 +1,804 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibpconsoles.ibp.com +spec: + group: ibp.com + names: + kind: IBPConsole + listKind: IBPConsoleList + plural: ibpconsoles + singular: ibpconsole + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: The Console is used to deploy and manage the CA, peer, ordering + nodes. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPConsoleSpec defines the desired state of IBPConsole + properties: + action: + description: Action (Optional) is action object for trigerring actions + properties: + restart: + type: boolean + type: object + allowDefaultPassword: + description: AllowDefaultPassword, if true, will bypass the password + reset flow on the first connection to the console GUI. By default + (false), all consoles require a password reset at the first login. + type: boolean + arch: + description: Arch (Optional) is the architecture of the nodes where + console should be deployed + items: + type: string + type: array + authScheme: + description: console settings AuthScheme is auth scheme for console + access + type: string + clusterdata: + description: ClusterData is object cluster data information + properties: + namespace: + type: string + type: + description: Type provides the type of cluster + type: string + zones: + description: Zones provides the zones available + items: + type: string + type: array + type: object + components: + description: Components is database name used for components + type: string + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + properties: + console: + description: Console is the overrides to console configuration + type: object + x-kubernetes-preserve-unknown-fields: true + deployer: + description: Deployer is the overrides to deployer configuration + type: object + x-kubernetes-preserve-unknown-fields: true + maxnamelength: + description: MaxNameLength (Optional) is the maximum length of + the name that the console can have + type: integer + type: object + configtxlator: + description: ConfigtxlatorURL is url for configtxlator server + type: string + connectionString: + description: ConnectionString is connection url for backend database + type: string + crn: + properties: + account_id: + type: string + c_name: + type: string + c_type: + type: string + instance_id: + type: string + location: + type: string + resource_id: + type: string + resource_type: + type: string + service_name: + type: string + version: + type: string + type: object + deployer: + description: Deployer is object for deployer configs + properties: + components_db: + type: string + connectionstring: + type: string + create_db: + type: boolean + domain: + type: string + type: object + deployerTimeout: + description: DeployerTimeout is timeout value for deployer calls + format: int32 + type: integer + deployerUrl: + description: DeployerURL is url for deployer server + type: string + email: + description: Email is the email used for initial access + type: string + featureflags: + description: FeatureFlags is object for feature flag settings + properties: + capabilities_enabled: + type: boolean + create_channel_enabled: + type: boolean + dev_mode: + type: boolean + enable_ou_identifier: + type: boolean + high_availability: + type: boolean + hsm_enabled: + type: boolean + import_only_enabled: + type: boolean + infra_import_options: + properties: + platform: + type: string + supported_cas: + items: + type: string + type: array + supported_orderers: + items: + type: string + type: array + supported_peers: + items: + type: string + type: array + type: object + lifecycle2_0_enabled: + type: boolean + mustgather_enabled: + type: boolean + patch_1_4to2_x_enabled: + type: boolean + read_only_enabled: + type: boolean + remote_peer_config_enabled: + type: boolean + saas_enabled: + type: boolean + scale_raft_nodes_enabled: + type: boolean + templates_enabled: + type: boolean + type: object + iamApiKey: + type: string + ibmid: + properties: + client_id: + type: string + client_secret: + type: string + url: + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for console's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for console's + deployment + properties: + configtxlatorImage: + description: ConfigtxlatorImage is the name of the configtxlator + image + type: string + configtxlatorTag: + description: ConfigtxlatorTag is the tag of the configtxlator + image + type: string + consoleImage: + description: ConsoleImage is the name of the console image + type: string + consoleInitImage: + description: ConsoleInitImage is the name of the console init + image + type: string + consoleInitTag: + description: ConsoleInitTag is the tag of the console init image + type: string + consoleTag: + description: ConsoleTag is the tag of the console image + type: string + couchdbImage: + description: CouchDBImage is the name of the couchdb image + type: string + couchdbTag: + description: CouchDBTag is the tag of the couchdb image + type: string + deployerImage: + description: DeployerImage is the name of the deployer image + type: string + deployerTag: + description: DeployerTag is the tag of the deployer image + type: string + mustgatherImage: + description: MustgatherImage is the name of the mustgather image + type: string + mustgatherTag: + description: MustgatherTag is the tag of the mustgatherTag image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + kubeconfig: + format: byte + type: string + kubeconfignamespace: + type: string + kubeconfigsecretname: + type: string + license: + description: License should be accepted by the user to be able to + setup console + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + networkinfo: + description: NetworkInfo is object for network overrides + properties: + configtxlatorPort: + description: ConfigtxlatorPort is the port to access configtxlator + format: int32 + type: integer + consolePort: + description: ConsolePort is the port to access the console + format: int32 + type: integer + domain: + description: Domain for the components + type: string + proxyPort: + description: ProxyPort is the port to access console proxy + format: int32 + type: integer + type: object + password: + description: Password is initial password to access console + type: string + passwordSecretName: + description: PasswordSecretName is secretname where password is stored + type: string + proxying: + type: boolean + region: + description: Region (Optional) is the region of the nodes where the + console should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of console + replicas to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to console deployment + properties: + configtxlator: + description: Configtxlator is the resources provided to the configtxlator + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + console: + description: Console is the resources provided to the console + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + couchdb: + description: CouchDB is the resources provided to the couchdb + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + deployer: + description: Deployer is the resources provided to the deployer + container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init is the resources provided to the init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + segmentWriteKey: + type: string + service: + description: Service (Optional) is the override object for console's + service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + serviceAccountName: + description: ServiceAccountName defines serviceaccount used for console + deployment + type: string + sessions: + description: Sessions is sessions database name to use + type: string + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for CA's PVC config + properties: + console: + description: Console is the configuration of the storage of the + console + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + system: + description: System is system database name to use + type: string + systemChannel: + description: SystemChannel is default systemchannel name + type: string + tlsSecretName: + description: TLSSecretName is secret name to load custom tls certs + type: string + usetags: + description: UseTags (Optional) is a flag to switch between image + digests and tags + type: boolean + version: + description: Version (Optional) is version for the console + type: string + versions: + properties: + ca: + additionalProperties: + properties: + default: + type: boolean + image: + description: CAImages is the list of images to be used in + CA deployment + properties: + caImage: + description: CAImage is the name of the CA image + type: string + caInitImage: + description: CAInitImage is the name of the Init image + type: string + caInitTag: + description: CAInitTag is the tag of the Init image + type: string + caTag: + description: CATag is the tag of the CA image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image + for crypto generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image + for crypto generation + type: string + hsmImage: + description: HSMImage is the name of the HSM image + type: string + hsmTag: + description: HSMTag is the tag of the HSM image + type: string + type: object + version: + type: string + required: + - default + - version + type: object + type: object + orderer: + additionalProperties: + properties: + default: + type: boolean + image: + description: OrdererImages is the list of images to be used + in orderer deployment + properties: + enrollerImage: + description: EnrollerImage is the name of the init image + for crypto generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image + for crypto generation + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web + proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy + image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + ordererImage: + description: OrdererImage is the name of the orderer + image + type: string + ordererInitImage: + description: OrdererInitImage is the name of the orderer + init image + type: string + ordererInitTag: + description: OrdererInitTag is the tag of the orderer + init image + type: string + ordererTag: + description: OrdererTag is the tag of the orderer image + type: string + type: object + version: + type: string + required: + - default + - version + type: object + type: object + peer: + additionalProperties: + properties: + default: + type: boolean + image: + description: PeerImages is the list of images to be used + in peer deployment + properties: + builderImage: + description: BuilderImage is the name of the builder + image + type: string + builderTag: + description: BuilderTag is the tag of the builder image + type: string + chaincodeLauncherImage: + description: CCLauncherImage is the name of the chaincode + launcher image + type: string + chaincodeLauncherTag: + description: CCLauncherTag is the tag of the chaincode + launcher image + type: string + couchdbImage: + description: CouchDBImage is the name of the couchdb + image + type: string + couchdbTag: + description: CouchDBTag is the tag of the couchdb image + type: string + dindImage: + description: DindImage is the name of the dind image + type: string + dindTag: + description: DindTag is the tag of the dind image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image + for crypto generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image + for crypto generation + type: string + fileTransferImage: + description: FileTransferImage is the name of the file + transfer image + type: string + fileTransferTag: + description: FileTransferTag is the tag of the file + transfer image + type: string + fluentdImage: + description: FluentdImage is the name of the fluentd + logger image + type: string + fluentdTag: + description: FluentdTag is the tag of the fluentd logger + image + type: string + goEnvImage: + description: GoEnvImage is the name of the goenv image + type: string + goEnvTag: + description: GoEnvTag is the tag of the goenv image + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web + proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy + image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + javaEnvImage: + description: JavaEnvImage is the name of the javaenv + image + type: string + javaEnvTag: + description: JavaEnvTag is the tag of the javaenv image + type: string + nodeEnvImage: + description: NodeEnvImage is the name of the nodeenv + image + type: string + nodeEnvTag: + description: NodeEnvTag is the tag of the nodeenv image + type: string + peerImage: + description: PeerImage is the name of the peer image + type: string + peerInitImage: + description: PeerInitImage is the name of the peer init + image + type: string + peerInitTag: + description: PeerInitTag is the tag of the peer init + image + type: string + peerTag: + description: PeerTag is the tag of the peer image + type: string + type: object + version: + type: string + required: + - default + - version + type: object + type: object + required: + - ca + - orderer + - peer + type: object + zone: + description: Zone (Optional) is the zone of the nodes where the console + should be deployed + type: string + required: + - license + - usetags + - version + type: object + status: + description: Status is the observed state of IBPConsole + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/ibp.com_ibporderers.yaml b/config/crd/bases/ibp.com_ibporderers.yaml new file mode 100644 index 00000000..91adf32c --- /dev/null +++ b/config/crd/bases/ibp.com_ibporderers.yaml @@ -0,0 +1,927 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibporderers.ibp.com +spec: + group: ibp.com + names: + kind: IBPOrderer + listKind: IBPOrdererList + plural: ibporderers + singular: ibporderer + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: 'Ordering nodes create the blocks that form the ledger and send + them to peers. Warning: Orderer deployment using this tile is not supported. + Please use the IBP Console to deploy an orderer.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPOrdererSpec defines the desired state of IBPOrderer + properties: + action: + description: Action (Optional) is object for orderer actions + properties: + enroll: + description: Enroll contains actions for triggering crypto enroll + properties: + ecert: + description: Ecert is used to trigger enroll for ecert + type: boolean + tlscert: + description: TLSCert is used to trigger enroll for tls certs + type: boolean + type: object + reenroll: + description: Reenroll contains actions for triggering crypto reenroll + properties: + ecert: + description: Ecert is used to trigger reenroll for ecert + type: boolean + ecertNewKey: + description: EcertNewKey is used to trigger reenroll for ecert + and also generating a new private key + type: boolean + tlscert: + description: TLSCert is used to trigger reenroll for tlscert + type: boolean + tlscertNewKey: + description: TLSCertNewKey is used to trigger reenroll for + tlscert and also generating a new private key + type: boolean + type: object + restart: + description: Restart action is used to restart orderer deployment + type: boolean + type: object + arch: + description: Arch (Optional) is the architecture of the nodes where + orderer should be deployed + items: + type: string + type: array + clusterSize: + description: ClusterSize (Optional) number of orderers if a cluster + type: integer + clusterconfigoverride: + description: ClusterConfigOverride (Optional) is array of config overrides + for cluster + items: + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + clustersecret: + description: ClusterSecret (Optional) is array of msp crypto for cluster + items: + description: SecretSpec defines the crypto spec to pass to components + properties: + enrollment: + description: Enrollment defines enrollment part of secret spec + properties: + clientauth: + description: ClientAuth contains client uath enrollment + details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + component: + description: Component contains ecert enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + tls: + description: TLS contains tls enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + type: object + msp: + description: MSP defines msp part of secret spec + properties: + clientauth: + description: ClientAuth contains crypto for client auth + certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts + array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + component: + description: Component contains crypto for ecerts + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts + array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + tls: + description: TLS contains crypto for tls certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts + array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + type: object + type: object + type: array + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + type: object + x-kubernetes-preserve-unknown-fields: true + customNames: + description: CustomNames (Optional) is to use pre-configured resources + for orderer's deployment + properties: + pvc: + description: PVC is the list of PVC Names to be used for orderer's + deployment + properties: + orderer: + description: Orderer is the pvc to be used as orderer's storage + type: string + type: object + type: object + disablenodeou: + description: DisableNodeOU (Optional) is used to switch nodeou on + and off + type: boolean + domain: + description: Domain is the sub-domain used for orderer's deployment + type: string + externalAddress: + description: ExternalAddress (Optional) is used internally + type: string + genesisBlock: + description: GenesisBlock (Optional) is genesis block to start the + orderer + type: string + genesisProfile: + type: string + hsm: + description: HSM (Optional) is DEPRECATED + properties: + pkcs11endpoint: + description: PKCS11Endpoint is DEPRECATED + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for orderer's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for orderer's + deployment + properties: + enrollerImage: + description: EnrollerImage is the name of the init image for crypto + generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image for crypto + generation + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + ordererImage: + description: OrdererImage is the name of the orderer image + type: string + ordererInitImage: + description: OrdererInitImage is the name of the orderer init + image + type: string + ordererInitTag: + description: OrdererInitTag is the tag of the orderer init image + type: string + ordererTag: + description: OrdererTag is the tag of the orderer image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + isprecreate: + description: IsPrecreate (Optional) defines if orderer is in precreate + state + type: boolean + license: + description: License should be accepted by the user to be able to + setup orderer + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + location: + description: ClusterLocation (Optional) is array of cluster location + settings for cluster + items: + description: IBPOrdererClusterLocation (Optional) is object of cluster + location settings for cluster + properties: + region: + description: Region (Optional) is the region of the nodes where + the orderer should be deployed + type: string + zone: + description: Zone (Optional) is the zone of the nodes where + the orderer should be deployed + type: string + type: object + type: array + mspID: + description: MSPID is the msp id of the orderer + type: string + numSecondsWarningPeriod: + description: NumSecondsWarningPeriod (Optional - default 30 days) + is used to define certificate expiry warning period. + format: int64 + type: integer + number: + description: NodeNumber (Optional) is the number of this node in cluster + - used internally + type: integer + ordererType: + description: OrdererType is type of orderer you want to start + type: string + orgName: + description: OrgName is the organization name of the orderer + type: string + region: + description: Region (Optional) is the region of the nodes where the + orderer should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of orderer + replicas to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to orderer deployment + properties: + enroller: + description: Enroller (Optional) is the resources provided to + the enroller container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + hsmdaemon: + description: HSMDaemon (Optional) is the resources provided to + the HSM Daemon container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init (Optional) is the resources provided to the + init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + orderer: + description: Orderer (Optional) is the resources provided to the + orderer container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + proxy: + description: GRPCProxy (Optional) is the resources provided to + the proxy container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + secret: + description: Secret is object for msp crypto + properties: + enrollment: + description: Enrollment defines enrollment part of secret spec + properties: + clientauth: + description: ClientAuth contains client uath enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + component: + description: Component contains ecert enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + tls: + description: TLS contains tls enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + type: object + msp: + description: MSP defines msp part of secret spec + properties: + clientauth: + description: ClientAuth contains crypto for client auth certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + component: + description: Component contains crypto for ecerts + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + tls: + description: TLS contains crypto for tls certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + type: object + type: object + service: + description: Service (Optional) is the override object for orderer's + service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for CA's PVC config + properties: + orderer: + description: Orderer (Optional) is the configuration of the storage + of the orderer + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + systemChannelName: + description: SystemChannelName is the name of systemchannel + type: string + useChannelLess: + type: boolean + version: + description: FabricVersion (Optional) is fabric version for the orderer + type: string + zone: + description: Zone (Optional) is the zone of the nodes where the orderer + should be deployed + type: string + required: + - license + - version + type: object + status: + description: IBPOrdererStatus defines the observed state of IBPOrderer + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/ibp.com_ibppeers.yaml b/config/crd/bases/ibp.com_ibppeers.yaml new file mode 100644 index 00000000..81d441d6 --- /dev/null +++ b/config/crd/bases/ibp.com_ibppeers.yaml @@ -0,0 +1,863 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: ibppeers.ibp.com +spec: + group: ibp.com + names: + kind: IBPPeer + listKind: IBPPeerList + plural: ibppeers + singular: ibppeer + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: 'IBPPeer is the Schema for the ibppeers API. Warning: Peer deployment + using this tile is not supported. Please use the IBP Console to deploy a + Peer.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IBPPeerSpec defines the desired state of IBPPeer + properties: + action: + description: Action (Optional) is object for peer actions + properties: + enroll: + description: Enroll contains actions for triggering crypto enroll + properties: + ecert: + description: Ecert is used to trigger enroll for ecert + type: boolean + tlscert: + description: TLSCert is used to trigger enroll for tlscert + type: boolean + type: object + reenroll: + description: Reenroll contains actions for triggering crypto reenroll + properties: + ecert: + description: Ecert is used to trigger reenroll for ecert + type: boolean + ecertNewKey: + description: EcertNewKey is used to trigger reenroll for ecert + and also generating a new private key + type: boolean + tlscert: + description: TLSCert is used to trigger reenroll for tlscert + type: boolean + tlscertNewKey: + description: TLSCertNewKey is used to trigger reenroll for + tlscert and also generating a new private key + type: boolean + type: object + restart: + description: Restart action is used to restart peer deployment + type: boolean + upgradedbs: + description: UpgradeDBs action is used to trigger peer node upgrade-dbs + command + type: boolean + type: object + arch: + description: cluster related configs Arch (Optional) is the architecture + of the nodes where peer should be deployed + items: + type: string + type: array + chaincodeBuilderConfig: + additionalProperties: + type: string + description: ChaincodeBuilderConfig (Optional) is a k/v map providing + a scope for template substitutions defined in chaincode-as-a-service + package metadata files. The map will be serialized as JSON and set + in the peer deployment CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG env + variable. + type: object + configoverride: + description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + type: object + x-kubernetes-preserve-unknown-fields: true + customNames: + description: CustomNames (Optional) is to use pre-configured resources + for peer's deployment + properties: + pvc: + description: PVC is the list of PVC Names to be used for peer's + deployment + properties: + peer: + description: Peer is the pvc to be used as peer's storage + type: string + statedb: + description: StateDB is the pvc to be used as statedb's storage + type: string + type: object + type: object + dindArgs: + description: advanced configs DindArgs (Optional) is used to override + args passed to dind container + items: + type: string + type: array + disablenodeou: + description: DisableNodeOU (Optional) is used to switch nodeou on + and off + type: boolean + domain: + description: proxy ip passed if not OCP, domain for OCP Domain is + the sub-domain used for peer's deployment + type: string + hsm: + description: HSM (Optional) is DEPRECATED + properties: + pkcs11endpoint: + description: PKCS11Endpoint is DEPRECATED + type: string + type: object + imagePullSecrets: + description: ImagePullSecrets (Optional) is the list of ImagePullSecrets + to be used for peer's deployment + items: + type: string + type: array + images: + description: Images (Optional) lists the images to be used for peer's + deployment + properties: + builderImage: + description: BuilderImage is the name of the builder image + type: string + builderTag: + description: BuilderTag is the tag of the builder image + type: string + chaincodeLauncherImage: + description: CCLauncherImage is the name of the chaincode launcher + image + type: string + chaincodeLauncherTag: + description: CCLauncherTag is the tag of the chaincode launcher + image + type: string + couchdbImage: + description: CouchDBImage is the name of the couchdb image + type: string + couchdbTag: + description: CouchDBTag is the tag of the couchdb image + type: string + dindImage: + description: DindImage is the name of the dind image + type: string + dindTag: + description: DindTag is the tag of the dind image + type: string + enrollerImage: + description: EnrollerImage is the name of the init image for crypto + generation + type: string + enrollerTag: + description: EnrollerTag is the tag of the init image for crypto + generation + type: string + fileTransferImage: + description: FileTransferImage is the name of the file transfer + image + type: string + fileTransferTag: + description: FileTransferTag is the tag of the file transfer image + type: string + fluentdImage: + description: FluentdImage is the name of the fluentd logger image + type: string + fluentdTag: + description: FluentdTag is the tag of the fluentd logger image + type: string + goEnvImage: + description: GoEnvImage is the name of the goenv image + type: string + goEnvTag: + description: GoEnvTag is the tag of the goenv image + type: string + grpcwebImage: + description: GRPCWebImage is the name of the grpc web proxy image + type: string + grpcwebTag: + description: GRPCWebTag is the tag of the grpc web proxy image + type: string + hsmImage: + description: HSMImage is the name of the hsm image + type: string + hsmTag: + description: HSMTag is the tag of the hsm image + type: string + javaEnvImage: + description: JavaEnvImage is the name of the javaenv image + type: string + javaEnvTag: + description: JavaEnvTag is the tag of the javaenv image + type: string + nodeEnvImage: + description: NodeEnvImage is the name of the nodeenv image + type: string + nodeEnvTag: + description: NodeEnvTag is the tag of the nodeenv image + type: string + peerImage: + description: PeerImage is the name of the peer image + type: string + peerInitImage: + description: PeerInitImage is the name of the peer init image + type: string + peerInitTag: + description: PeerInitTag is the tag of the peer init image + type: string + peerTag: + description: PeerTag is the tag of the peer image + type: string + type: object + ingress: + description: Ingress (Optional) is ingress object for ingress overrides + properties: + class: + description: Class (Optional) is the class to set for ingress + type: string + tlsSecretName: + description: TlsSecretName (Optional) is the secret name to be + used for tls certificates + type: string + type: object + license: + description: License should be accepted by the user to be able to + setup Peer + properties: + accept: + description: Accept should be set to true to accept the license. + enum: + - true + type: boolean + type: object + mspID: + description: peer specific configs MSPID is the msp id of the peer + type: string + mspSecret: + description: msp data can be passed in secret on in spec MSPSecret + (Optional) is secret used to store msp crypto + type: string + numSecondsWarningPeriod: + description: NumSecondsWarningPeriod (Optional - default 30 days) + is used to define certificate expiry warning period. + format: int64 + type: integer + peerExternalEndpoint: + description: PeerExternalEndpoint (Optional) is used to override peer + external endpoint + type: string + region: + description: Region (Optional) is the region of the nodes where the + peer should be deployed + type: string + registryURL: + description: RegistryURL is registry url used to pull images + type: string + replicas: + description: Replicas (Optional - default 1) is the number of peer + replicas to be setup + format: int32 + type: integer + resources: + description: Resources (Optional) is the amount of resources to be + provided to peer deployment + properties: + chaincodelauncher: + description: CCLauncher (Optional) is the resources provided to + the cclauncher container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + couchdb: + description: CouchDB (Optional) is the resources provided to the + couchdb container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + dind: + description: DinD (Optional) is the resources provided to the + dind container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + enroller: + description: Enroller (Optional) is the resources provided to + the enroller container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + fluentd: + description: FluentD (Optional) is the resources provided to the + fluentd container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + hsmdaemon: + description: HSMDaemon (Optional) is the resources provided to + the HSM Daemon container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + init: + description: Init (Optional) is the resources provided to the + init container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + peer: + description: / Peer (Optional) is the resources provided to the + peer container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + proxy: + description: GRPCProxy (Optional) is the resources provided to + the proxy container + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + secret: + description: Secret is object for msp crypto + properties: + enrollment: + description: Enrollment defines enrollment part of secret spec + properties: + clientauth: + description: ClientAuth contains client uath enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + component: + description: Component contains ecert enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + tls: + description: TLS contains tls enrollment details + properties: + admincerts: + description: AdminCerts is the base64 encoded admincerts + items: + type: string + type: array + cahost: + description: CAHost is host part of the CA to use + type: string + caname: + description: CAName is name of CA + type: string + caport: + description: CAPort is port of the CA to use + type: string + catls: + description: CATLS is tls details to talk to CA endpoint + properties: + cacert: + description: CACert is the base64 encoded certificate + type: string + type: object + csr: + description: CSR is the CSR override object + properties: + hosts: + description: Hosts override for CSR + items: + type: string + type: array + type: object + enrollid: + description: EnrollID is the enrollment username + type: string + enrollsecret: + description: EnrollSecret is enrollment secret ( password + ) + type: string + type: object + type: object + msp: + description: MSP defines msp part of secret spec + properties: + clientauth: + description: ClientAuth contains crypto for client auth certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + component: + description: Component contains crypto for ecerts + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + tls: + description: TLS contains crypto for tls certs + properties: + admincerts: + description: AdminCerts is base64 encoded admincerts array + items: + type: string + type: array + cacerts: + description: CACerts is base64 encoded cacerts array + items: + type: string + type: array + intermediatecerts: + description: IntermediateCerts is base64 encoded intermediate + certs array + items: + type: string + type: array + keystore: + description: KeyStore is base64 encoded private key + type: string + signcerts: + description: SignCerts is base64 encoded sign cert + type: string + type: object + type: object + type: object + service: + description: Service (Optional) is the override object for peer's + service + properties: + type: + description: The "type" of the service to be used + type: string + type: object + stateDb: + description: StateDb (Optional) is the statedb used for peer, can + be couchdb or leveldb + type: string + storage: + description: Storage (Optional - uses default storageclass if not + provided) is the override object for peer's PVC config + properties: + peer: + description: Peer (Optional) is the configuration of the storage + of the peer + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + statedb: + description: StateDB (Optional) is the configuration of the storage + of the statedb + properties: + class: + description: Class is the storage class + type: string + size: + description: Size of storage + type: string + type: object + type: object + version: + description: FabricVersion (Optional) is fabric version for the peer + type: string + zone: + description: Zone (Optional) is the zone of the nodes where the peer + should be deployed + type: string + required: + - license + - version + type: object + status: + description: IBPPeerStatus defines the observed state of IBPPeer + properties: + errorcode: + description: ErrorCode is the code of classification of errors + type: integer + lastHeartbeatTime: + description: LastHeartbeatTime is when the controller reconciled this + component + type: string + message: + description: Message provides a message for the status to be shown + to customer + type: string + reason: + description: Reason provides a reason for an error + type: string + status: + description: Status is defined based on the current status of the + component + type: string + type: + description: Type is true or false based on if status is valid + type: string + version: + description: Version is the product (IBP) version of the component + type: string + versions: + description: Versions is the operand version of the component + properties: + reconciled: + description: Reconciled provides the reconciled version of the + operand + type: string + required: + - reconciled + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 00000000..1480b2df --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,30 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/ibp.com_ibpcas.yaml +- bases/ibp.com_ibppeers.yaml +- bases/ibp.com_ibporderers.yaml +- bases/ibp.com_ibpconsoles.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_ibpcas.yaml +#- patches/webhook_in_ibppeers.yaml +#- patches/webhook_in_ibporderers.yaml +#- patches/webhook_in_ibpconsoles.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_ibpcas.yaml +#- patches/cainjection_in_ibppeers.yaml +#- patches/cainjection_in_ibporderers.yaml +#- patches/cainjection_in_ibpconsoles.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..6f83d9a9 --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,17 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_ibpcas.yaml b/config/crd/patches/cainjection_in_ibpcas.yaml new file mode 100644 index 00000000..b9ba9d0c --- /dev/null +++ b/config/crd/patches/cainjection_in_ibpcas.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ibpcas.ibp.com diff --git a/config/crd/patches/cainjection_in_ibpconsoles.yaml b/config/crd/patches/cainjection_in_ibpconsoles.yaml new file mode 100644 index 00000000..ff17776a --- /dev/null +++ b/config/crd/patches/cainjection_in_ibpconsoles.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ibpconsoles.ibp.com diff --git a/config/crd/patches/cainjection_in_ibporderers.yaml b/config/crd/patches/cainjection_in_ibporderers.yaml new file mode 100644 index 00000000..3389a140 --- /dev/null +++ b/config/crd/patches/cainjection_in_ibporderers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ibporderers.ibp.com diff --git a/config/crd/patches/cainjection_in_ibppeers.yaml b/config/crd/patches/cainjection_in_ibppeers.yaml new file mode 100644 index 00000000..a49023bc --- /dev/null +++ b/config/crd/patches/cainjection_in_ibppeers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ibppeers.ibp.com diff --git a/config/crd/patches/webhook_in_ibpcas.yaml b/config/crd/patches/webhook_in_ibpcas.yaml new file mode 100644 index 00000000..ea100502 --- /dev/null +++ b/config/crd/patches/webhook_in_ibpcas.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibpcas.ibp.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_ibpconsoles.yaml b/config/crd/patches/webhook_in_ibpconsoles.yaml new file mode 100644 index 00000000..5e1f04bd --- /dev/null +++ b/config/crd/patches/webhook_in_ibpconsoles.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibpconsoles.ibp.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_ibporderers.yaml b/config/crd/patches/webhook_in_ibporderers.yaml new file mode 100644 index 00000000..9053ab54 --- /dev/null +++ b/config/crd/patches/webhook_in_ibporderers.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibporderers.ibp.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_ibppeers.yaml b/config/crd/patches/webhook_in_ibppeers.yaml new file mode 100644 index 00000000..199c7cbb --- /dev/null +++ b/config/crd/patches/webhook_in_ibppeers.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibppeers.ibp.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 00000000..87136bb5 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,66 @@ +# Adds namespace to all resources. +namespace: operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: operator- + +bases: + - ../crd + - ../rbac + - ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patchesStrategicMerge: + # Protect the /metrics endpoint by putting it behind auth. + # If you want your controller-manager to expose the /metrics + # endpoint w/o any authn/z, please comment the following line. + - manager_auth_proxy_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1alpha2 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1alpha2 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 00000000..77e743d1 --- /dev/null +++ b/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,25 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: manager + args: + - "--metrics-addr=127.0.0.1:8080" + - "--enable-leader-election" diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000..738de350 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 00000000..7e79bf99 --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,15 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/ingress/k3s/ingress-nginx-controller.yaml b/config/ingress/k3s/ingress-nginx-controller.yaml new file mode 100644 index 00000000..f3b03686 --- /dev/null +++ b/config/ingress/k3s/ingress-nginx-controller.yaml @@ -0,0 +1,39 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: ingress-nginx + name: ingress-nginx-controller +spec: + template: + spec: + containers: + - name: controller + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --enable-ssl-passthrough \ No newline at end of file diff --git a/config/ingress/k3s/kustomization.yaml b/config/ingress/k3s/kustomization.yaml new file mode 100644 index 00000000..d3bf86f4 --- /dev/null +++ b/config/ingress/k3s/kustomization.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - https://github.com/kubernetes/ingress-nginx.git/deploy/static/provider/cloud?ref=controller-v1.1.2 + +patchesStrategicMerge: + - ingress-nginx-controller.yaml diff --git a/config/ingress/kind/ingress-nginx-controller.yaml b/config/ingress/kind/ingress-nginx-controller.yaml new file mode 100644 index 00000000..e178babc --- /dev/null +++ b/config/ingress/kind/ingress-nginx-controller.yaml @@ -0,0 +1,39 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: ingress-nginx + name: ingress-nginx-controller +spec: + template: + spec: + containers: + - name: controller + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --watch-ingress-without-class=true + - --publish-status-address=localhost + - --enable-ssl-passthrough \ No newline at end of file diff --git a/config/ingress/kind/kustomization.yaml b/config/ingress/kind/kustomization.yaml new file mode 100644 index 00000000..3174834c --- /dev/null +++ b/config/ingress/kind/kustomization.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - https://github.com/kubernetes/ingress-nginx.git/deploy/static/provider/kind?ref=controller-v1.1.2 + +patchesStrategicMerge: + - ingress-nginx-controller.yaml \ No newline at end of file diff --git a/config/ingress/kustomization.yaml b/config/ingress/kustomization.yaml new file mode 100644 index 00000000..e69de29b diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 00000000..41c818a5 --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: + - manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: + - name: controller + newName: controller + newTag: latest diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 00000000..e3339f7e --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,129 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + name: controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - --enable-leader-election + image: controller:latest + name: manager + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi + - command: + - ibp-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: operator + - name: CLUSTERTYPE + value: OPENSHIFT + image: todo:update + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 10 + tcpSocket: + port: 8383 + timeoutSeconds: 5 + name: operator + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + tcpSocket: + port: 8383 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + memory: 200Mi + ephemeral-storage: 1Gi + requests: + cpu: 100m + memory: 200Mi + ephemeral-storage: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 1001 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - s390x + hostIPC: false + hostNetwork: false + hostPID: false + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1001 + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/manifests/bases/fabric-opensource-operator.clusterserviceversion.yaml b/config/manifests/bases/fabric-opensource-operator.clusterserviceversion.yaml new file mode 100644 index 00000000..891a40a9 --- /dev/null +++ b/config/manifests/bases/fabric-opensource-operator.clusterserviceversion.yaml @@ -0,0 +1,1887 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Seamless Upgrades + categories: Database + certified: "true" + containerImage: todo:update + createdAt: "2020-07-14T00:00:00Z" + description: TODO + operators.operatorframework.io/builder: operator-sdk-v1.13.0+git + operators.operatorframework.io/internal-objects: '["ibpcas.ibp.com","ibppeers.ibp.com","ibporderers.ibp.com"]' + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + repository: "" + name: fabric-opensource-operator.v1.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Certificate Authorities issue certificates for all the identities + to transact on the network. + displayName: Hyperledger Fabric CA + kind: IBPCA + name: ibpcas.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is action object for trigerring actions + displayName: Action + path: action + - description: Renew action is object for certificate renewals + displayName: Renew + path: action.renew + - description: TLSCert action is used to renew TLS crypto for CA server + displayName: TLSCert + path: action.renew.tlscert + - description: Restart action is used to restart the running CA + displayName: Restart + path: action.restart + - description: Arch (Optional) is the architecture of the nodes where CA should + be deployed + displayName: Arch + path: arch + - description: ConfigOverride (Optional) is the object to provide overrides + to CA & TLSCA config + displayName: Config Override + path: configoverride + - description: CA (Optional) is the overrides to CA's configuration + displayName: CA + path: configoverride.ca + - description: MaxNameLength (Optional) is the maximum length of the name that + the CA can have + displayName: Max Name Length + path: configoverride.maxnamelength + - description: TLSCA (Optional) is the overrides to TLSCA's configuration + displayName: TLSCA + path: configoverride.tlsca + - description: CustomNames (Optional) is to use pre-configured resources for + CA's deployment + displayName: Custom Names + path: customNames + - description: PVC is the list of PVC Names to be used for CA's deployment + displayName: PVC + path: customNames.pvc + - description: CA is the pvc to be used as CA's storage + displayName: CA + path: customNames.pvc.ca + - description: Sqlite is the sqlite path to be used for CA's deployment + displayName: Sqlite + path: customNames.sqlitepath + - description: Domain is the sub-domain used for CA's deployment + displayName: Domain + path: domain + - description: HSM (Optional) is the paramters for the HSM if being used + displayName: HSM + path: hsm + - description: PKCS11Endpoint is the endpoint for the pkcs11 proxy + displayName: PKCS11 Endpoint + path: hsm.pkcs11endpoint + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for CA's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for CA's deployment + displayName: Images + path: images + - description: CAImage is the name of the CA image + displayName: CAImage + path: images.caImage + - description: CAInitImage is the name of the Init image + displayName: CAInit Image + path: images.caInitImage + - description: CAInitTag is the tag of the Init image + displayName: CAInit Tag + path: images.caInitTag + - description: CATag is the tag of the CA image + displayName: CATag + path: images.caTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: images.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: images.enrollerTag + - description: HSMImage is the name of the HSM image + displayName: HSMImage + path: images.hsmImage + - description: HSMTag is the tag of the HSM image + displayName: HSMTag + path: images.hsmTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: License should be accepted by the user to be able to setup CA + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: NumSecondsWarningPeriod (Optional - default 30 days) is used + to define certificate expiry warning period. + displayName: Num Seconds Warning Period + path: numSecondsWarningPeriod + - description: Region (Optional) is the region of the nodes where the CA should + be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of CA replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to CA deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CA is the resources provided to the CA container + displayName: CA + path: resources.ca + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: EnrollJJob is the resources provided to the enroll job container + displayName: Enroll Job + path: resources.enrollJob + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: HSMDaemon is the resources provided to the HSM daemon container + displayName: HSMDaemon + path: resources.hsmDaemon + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Service (Optional) is the override object for CA's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for CA's PVC config + displayName: Storage + path: storage + - description: CA is the configuration of the storage of the CA + displayName: CA + path: storage.ca + - description: Class is the storage class + displayName: Class + path: storage.ca.class + - description: Size of storage + displayName: Size + path: storage.ca.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: FabricVersion (Optional) set the fabric version you want to use. + displayName: Fabric Version + path: version + - description: Zone (Optional) is the zone of the nodes where the CA should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + - description: The Console is used to deploy and manage the CA, peer, ordering + nodes. + displayName: Fabric Operations Console + kind: IBPConsole + name: ibpconsoles.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is action object for trigerring actions + displayName: Action + path: action + - description: Arch (Optional) is the architecture of the nodes where console + should be deployed + displayName: Arch + path: arch + - description: console settings AuthScheme is auth scheme for console access + displayName: Auth Scheme + path: authScheme + - description: ClusterData is object cluster data information + displayName: Cluster Data + path: clusterdata + - description: Components is database name used for components + displayName: Components + path: components + - description: ConfigOverride (Optional) is the object to provide overrides + displayName: Config Override + path: configoverride + - description: Console is the overrides to console configuration + displayName: Console + path: configoverride.console + - description: Deployer is the overrides to deployer configuration + displayName: Deployer + path: configoverride.deployer + - description: MaxNameLength (Optional) is the maximum length of the name that + the console can have + displayName: Max Name Length + path: configoverride.maxnamelength + - description: ConfigtxlatorURL is url for configtxlator server + displayName: Configtxlator URL + path: configtxlator + - description: ConnectionString is connection url for backend database + displayName: Connection String + path: connectionString + - description: Deployer is object for deployer configs + displayName: Deployer + path: deployer + - description: DeployerTimeout is timeout value for deployer calls + displayName: Deployer Timeout + path: deployerTimeout + - description: DeployerURL is url for deployer server + displayName: Deployer URL + path: deployerUrl + - description: Email is the email used for initial access + displayName: Email + path: email + - description: FeatureFlags is object for feature flag settings + displayName: Feature Flags + path: featureflags + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for console's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for console's deployment + displayName: Images + path: images + - description: ConfigtxlatorImage is the name of the configtxlator image + displayName: Configtxlator Image + path: images.configtxlatorImage + - description: ConfigtxlatorTag is the tag of the configtxlator image + displayName: Configtxlator Tag + path: images.configtxlatorTag + - description: ConsoleImage is the name of the console image + displayName: Console Image + path: images.consoleImage + - description: ConsoleInitImage is the name of the console init image + displayName: Console Init Image + path: images.consoleInitImage + - description: ConsoleInitTag is the tag of the console init image + displayName: Console Init Tag + path: images.consoleInitTag + - description: ConsoleTag is the tag of the console image + displayName: Console Tag + path: images.consoleTag + - description: CouchDBImage is the name of the couchdb image + displayName: Couch DBImage + path: images.couchdbImage + - description: CouchDBTag is the tag of the couchdb image + displayName: Couch DBTag + path: images.couchdbTag + - description: DeployerImage is the name of the deployer image + displayName: Deployer Image + path: images.deployerImage + - description: DeployerTag is the tag of the deployer image + displayName: Deployer Tag + path: images.deployerTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: License should be accepted by the user to be able to setup console + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: NetworkInfo is object for network overrides + displayName: Network Info + path: networkinfo + - description: ConfigtxlatorPort is the port to access configtxlator + displayName: Configtxlator Port + path: networkinfo.configtxlatorPort + - description: ConsolePort is the port to access the console + displayName: Console Port + path: networkinfo.consolePort + - description: Domain for the components + displayName: Domain + path: networkinfo.domain + - description: ProxyPort is the port to access console proxy + displayName: Proxy Port + path: networkinfo.proxyPort + - description: Password is initial password to access console + displayName: Password + path: password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:password + - description: PasswordSecretName is secretname where password is stored + displayName: Password Secret Name + path: passwordSecretName + - description: Region (Optional) is the region of the nodes where the console + should be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of console replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to console deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Configtxlator is the resources provided to the configtxlator + container + displayName: Configtxlator + path: resources.configtxlator + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Console is the resources provided to the console container + displayName: Console + path: resources.console + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CouchDB is the resources provided to the couchdb container + displayName: Couch DB + path: resources.couchdb + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Deployer is the resources provided to the deployer container + displayName: Deployer + path: resources.deployer + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Service (Optional) is the override object for console's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: ServiceAccountName defines serviceaccount used for console deployment + displayName: Service Account Name + path: serviceAccountName + - description: Sessions is sessions database name to use + displayName: Sessions + path: sessions + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for CA's PVC config + displayName: Storage + path: storage + - description: Console is the configuration of the storage of the console + displayName: Console + path: storage.console + - description: Class is the storage class + displayName: Class + path: storage.console.class + - description: Size of storage + displayName: Size + path: storage.console.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: System is system database name to use + displayName: System + path: system + - description: SystemChannel is default systemchannel name + displayName: System Channel + path: systemChannel + - description: TLSSecretName is secret name to load custom tls certs + displayName: TLSSecret Name + path: tlsSecretName + - description: Version (Optional) is version for the console + displayName: Version + path: version + - description: CAImage is the name of the CA image + displayName: CAImage + path: versions.ca.image.caImage + - description: CAInitImage is the name of the Init image + displayName: CAInit Image + path: versions.ca.image.caInitImage + - description: CAInitTag is the tag of the Init image + displayName: CAInit Tag + path: versions.ca.image.caInitTag + - description: CATag is the tag of the CA image + displayName: CATag + path: versions.ca.image.caTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: versions.ca.image.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: versions.ca.image.enrollerTag + - description: HSMImage is the name of the HSM image + displayName: HSMImage + path: versions.ca.image.hsmImage + - description: HSMTag is the tag of the HSM image + displayName: HSMTag + path: versions.ca.image.hsmTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: versions.orderer.image.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: versions.orderer.image.enrollerTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: versions.orderer.image.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: versions.orderer.image.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: versions.orderer.image.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: versions.orderer.image.hsmTag + - description: OrdererImage is the name of the orderer image + displayName: Orderer Image + path: versions.orderer.image.ordererImage + - description: OrdererInitImage is the name of the orderer init image + displayName: Orderer Init Image + path: versions.orderer.image.ordererInitImage + - description: OrdererInitTag is the tag of the orderer init image + displayName: Orderer Init Tag + path: versions.orderer.image.ordererInitTag + - description: OrdererTag is the tag of the orderer image + displayName: Orderer Tag + path: versions.orderer.image.ordererTag + - description: BuilderImage is the name of the builder image + displayName: Builder Image + path: versions.peer.image.builderImage + - description: BuilderTag is the tag of the builder image + displayName: Builder Tag + path: versions.peer.image.builderTag + - description: CCLauncherImage is the name of the chaincode launcher image + displayName: CCLauncher Image + path: versions.peer.image.chaincodeLauncherImage + - description: CCLauncherTag is the tag of the chaincode launcher image + displayName: CCLauncher Tag + path: versions.peer.image.chaincodeLauncherTag + - description: CouchDBImage is the name of the couchdb image + displayName: Couch DBImage + path: versions.peer.image.couchdbImage + - description: CouchDBTag is the tag of the couchdb image + displayName: Couch DBTag + path: versions.peer.image.couchdbTag + - description: DindImage is the name of the dind image + displayName: Dind Image + path: versions.peer.image.dindImage + - description: DindTag is the tag of the dind image + displayName: Dind Tag + path: versions.peer.image.dindTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: versions.peer.image.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: versions.peer.image.enrollerTag + - description: FileTransferImage is the name of the file transfer image + displayName: File Transfer Image + path: versions.peer.image.fileTransferImage + - description: FileTransferTag is the tag of the file transfer image + displayName: File Transfer Tag + path: versions.peer.image.fileTransferTag + - description: FluentdImage is the name of the fluentd logger image + displayName: Fluentd Image + path: versions.peer.image.fluentdImage + - description: FluentdTag is the tag of the fluentd logger image + displayName: Fluentd Tag + path: versions.peer.image.fluentdTag + - description: GoEnvImage is the name of the goenv image + displayName: Go Env Image + path: versions.peer.image.goEnvImage + - description: GoEnvTag is the tag of the goenv image + displayName: Go Env Tag + path: versions.peer.image.goEnvTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: versions.peer.image.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: versions.peer.image.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: versions.peer.image.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: versions.peer.image.hsmTag + - description: JavaEnvImage is the name of the javaenv image + displayName: Java Env Image + path: versions.peer.image.javaEnvImage + - description: JavaEnvTag is the tag of the javaenv image + displayName: Java Env Tag + path: versions.peer.image.javaEnvTag + - description: NodeEnvImage is the name of the nodeenv image + displayName: Node Env Image + path: versions.peer.image.nodeEnvImage + - description: NodeEnvTag is the tag of the nodeenv image + displayName: Node Env Tag + path: versions.peer.image.nodeEnvTag + - description: PeerImage is the name of the peer image + displayName: Peer Image + path: versions.peer.image.peerImage + - description: PeerInitImage is the name of the peer init image + displayName: Peer Init Image + path: versions.peer.image.peerInitImage + - description: PeerInitTag is the tag of the peer init image + displayName: Peer Init Tag + path: versions.peer.image.peerInitTag + - description: PeerTag is the tag of the peer image + displayName: Peer Tag + path: versions.peer.image.peerTag + - description: Zone (Optional) is the zone of the nodes where the console should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + - description: Ordering nodes create the blocks that form the ledger and send + them to peers. + displayName: Hyperledger Fabric Orderer + kind: IBPOrderer + name: ibporderers.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is object for orderer actions + displayName: Action + path: action + - description: Enroll contains actions for triggering crypto enroll + displayName: Enroll + path: action.enroll + - description: Ecert is used to trigger enroll for ecert + displayName: Ecert + path: action.enroll.ecert + - description: Reenroll contains actions for triggering crypto reenroll + displayName: Reenroll + path: action.reenroll + - description: Ecert is used to trigger reenroll for ecert + displayName: Ecert + path: action.reenroll.ecert + - description: EcertNewKey is used to trigger reenroll for ecert and also generating + a new private key + displayName: Ecert New Key + path: action.reenroll.ecertNewKey + - description: TLSCert is used to trigger reenroll for tlscert + displayName: TLSCert + path: action.reenroll.tlscert + - description: TLSCertNewKey is used to trigger reenroll for tlscert and also + generating a new private key + displayName: TLSCert New Key + path: action.reenroll.tlscertNewKey + - description: Restart action is used to restart orderer deployment + displayName: Restart + path: action.restart + - description: Arch (Optional) is the architecture of the nodes where orderer + should be deployed + displayName: Arch + path: arch + - description: ClusterSize (Optional) number of orderers if a cluster + displayName: Cluster Size + path: clusterSize + - description: ClusterConfigOverride (Optional) is array of config overrides + for cluster + displayName: Cluster Config Override + path: clusterconfigoverride + - description: ClusterSecret (Optional) is array of msp crypto for cluster + displayName: Cluster Secret + path: clustersecret + - description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + displayName: Config Override + path: configoverride + - description: CustomNames (Optional) is to use pre-configured resources for + orderer's deployment + displayName: Custom Names + path: customNames + - description: PVC is the list of PVC Names to be used for orderer's deployment + displayName: PVC + path: customNames.pvc + - description: Orderer is the pvc to be used as orderer's storage + displayName: Orderer + path: customNames.pvc.orderer + - description: DisableNodeOU (Optional) is used to switch nodeou on and off + displayName: Disable Node OU + path: disablenodeou + - description: Domain is the sub-domain used for orderer's deployment + displayName: Domain + path: domain + - description: ExternalAddress (Optional) is used internally + displayName: External Address + path: externalAddress + - description: GenesisBlock (Optional) is genesis block to start the orderer + displayName: Genesis Block + path: genesisBlock + - description: HSM (Optional) is the paramters for the HSM if being used + displayName: HSM + path: hsm + - description: PKCS11Endpoint is the endpoint for the pkcs11 proxy + displayName: PKCS11 Endpoint + path: hsm.pkcs11endpoint + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for orderer's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for orderer's deployment + displayName: Images + path: images + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: images.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: images.enrollerTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: images.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: images.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: images.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: images.hsmTag + - description: OrdererImage is the name of the orderer image + displayName: Orderer Image + path: images.ordererImage + - description: OrdererInitImage is the name of the orderer init image + displayName: Orderer Init Image + path: images.ordererInitImage + - description: OrdererInitTag is the tag of the orderer init image + displayName: Orderer Init Tag + path: images.ordererInitTag + - description: OrdererTag is the tag of the orderer image + displayName: Orderer Tag + path: images.ordererTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: IsPrecreate (Optional) defines if orderer is in precreate state + displayName: Is Precreate + path: isprecreate + - description: License should be accepted by the user to be able to setup orderer + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: ClusterLocation (Optional) is array of cluster location settings + for cluster + displayName: Cluster Location + path: location + - description: Region (Optional) is the region of the nodes where the orderer + should be deployed + displayName: Region + path: location[0].region + - description: Zone (Optional) is the zone of the nodes where the orderer should + be deployed + displayName: Zone + path: location[0].zone + - description: MSPID is the msp id of the orderer + displayName: MSPID + path: mspID + - description: NumSecondsWarningPeriod (Optional - default 30 days) is used + to define certificate expiry warning period. + displayName: Num Seconds Warning Period + path: numSecondsWarningPeriod + - description: NodeNumber (Optional) is the number of this node in cluster - + used internally + displayName: Node Number + path: number + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: OrdererType is type of orderer you want to start + displayName: Orderer Type + path: ordererType + - description: OrgName is the organization name of the orderer + displayName: Org Name + path: orgName + - description: Region (Optional) is the region of the nodes where the orderer + should be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of orderer replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to orderer deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Enroller (Optional) is the resources provided to the enroller + container + displayName: Enroller + path: resources.enroller + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: HSMDaemon (Optional) is the resources provided to the HSM Daemon + container + displayName: HSMDaemon + path: resources.hsmdaemon + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init (Optional) is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Orderer (Optional) is the resources provided to the orderer container + displayName: Orderer + path: resources.orderer + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: GRPCProxy (Optional) is the resources provided to the proxy container + displayName: GRPCProxy + path: resources.proxy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Secret is object for msp crypto + displayName: Secret + path: secret + - description: Enrollment defines enrollment part of secret spec + displayName: Enrollment + path: secret.enrollment + - description: ClientAuth contains client uath enrollment details + displayName: Client Auth + path: secret.enrollment.clientauth + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.clientauth.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.clientauth.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.clientauth.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.clientauth.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.clientauth.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.clientauth.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.clientauth.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.clientauth.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.clientauth.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.clientauth.enrollsecret + - description: Component contains ecert enrollment details + displayName: Component + path: secret.enrollment.component + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.component.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.component.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.component.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.component.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.component.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.component.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.component.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.component.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.component.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.component.enrollsecret + - description: TLS contains tls enrollment details + displayName: TLS + path: secret.enrollment.tls + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.tls.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.tls.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.tls.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.tls.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.tls.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.tls.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.tls.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.tls.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.tls.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.tls.enrollsecret + - description: MSP defines msp part of secret spec + displayName: MSP + path: secret.msp + - description: ClientAuth contains crypto for client auth certs + displayName: Client Auth + path: secret.msp.clientauth + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.clientauth.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.clientauth.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.clientauth.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.clientauth.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.clientauth.signcerts + - description: Component contains crypto for ecerts + displayName: Component + path: secret.msp.component + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.component.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.component.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.component.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.component.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.component.signcerts + - description: TLS contains crypto for tls certs + displayName: TLS + path: secret.msp.tls + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.tls.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.tls.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.tls.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.tls.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.tls.signcerts + - description: Service (Optional) is the override object for orderer's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for CA's PVC config + displayName: Storage + path: storage + - description: Orderer (Optional) is the configuration of the storage of the + orderer + displayName: Orderer + path: storage.orderer + - description: Class is the storage class + displayName: Class + path: storage.orderer.class + - description: Size of storage + displayName: Size + path: storage.orderer.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: SystemChannelName is the name of systemchannel + displayName: System Channel Name + path: systemChannelName + - description: FabricVersion (Optional) is fabric version for the orderer + displayName: Fabric Version + path: version + - description: Zone (Optional) is the zone of the nodes where the orderer should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + - description: Blockchain Peer is the Schema for the ibppeers API. + displayName: Hyperledger Fabric Peer + kind: IBPPeer + name: ibppeers.ibp.com + resources: + - kind: ConfigMaps + name: "" + version: v1 + - kind: Deployments + name: "" + version: v1 + - kind: IBPCA + name: "" + version: v1beta1 + - kind: IBPConsole + name: "" + version: v1beta1 + - kind: IBPOrderer + name: "" + version: v1beta1 + - kind: IBPPeer + name: "" + version: v1beta1 + - kind: Ingresses + name: "" + version: v1beta1 + - kind: PersistentVolumeClaim + name: "" + version: v1 + - kind: Pods + name: "" + version: v1 + - kind: Replicasets + name: "" + version: v1 + - kind: Role + name: "" + version: v1 + - kind: RoleBinding + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Secrets + name: "" + version: v1 + - kind: ServiceAccounts + name: "" + version: v1 + - kind: Services + name: "" + version: v1 + - kind: clusterversions + name: "" + version: v1 + specDescriptors: + - description: Action (Optional) is object for peer actions + displayName: Action + path: action + - description: Enroll contains actions for triggering crypto enroll + displayName: Enroll + path: action.enroll + - description: Ecert is used to trigger enroll for ecert + displayName: Ecert + path: action.enroll.ecert + - description: TLSCert is used to trigger enroll for tlscert + displayName: TLSCert + path: action.enroll.tlscert + - description: Reenroll contains actions for triggering crypto reenroll + displayName: Reenroll + path: action.reenroll + - description: Ecert is used to trigger reenroll for ecert + displayName: Ecert + path: action.reenroll.ecert + - description: EcertNewKey is used to trigger reenroll for ecert and also generating + a new private key + displayName: Ecert New Key + path: action.reenroll.ecertNewKey + - description: TLSCert is used to trigger reenroll for tlscert + displayName: TLSCert + path: action.reenroll.tlscert + - description: TLSCertNewKey is used to trigger reenroll for tlscert and also + generating a new private key + displayName: TLSCert New Key + path: action.reenroll.tlscertNewKey + - description: Restart action is used to restart peer deployment + displayName: Restart + path: action.restart + - description: UpgradeDBs action is used to trigger peer node upgrade-dbs command + displayName: Upgrade DBs + path: action.upgradedbs + - description: cluster related configs Arch (Optional) is the architecture of + the nodes where peer should be deployed + displayName: Arch + path: arch + - description: ConfigOverride (Optional) is the object to provide overrides + to core yaml config + displayName: Config Override + path: configoverride + - description: CustomNames (Optional) is to use pre-configured resources for + peer's deployment + displayName: Custom Names + path: customNames + - description: PVC is the list of PVC Names to be used for peer's deployment + displayName: PVC + path: customNames.pvc + - description: Peer is the pvc to be used as peer's storage + displayName: Peer + path: customNames.pvc.peer + - description: StateDB is the pvc to be used as statedb's storage + displayName: State DB + path: customNames.pvc.statedb + - description: advanced configs DindArgs (Optional) is used to override args + passed to dind container + displayName: Dind Args + path: dindArgs + - description: DisableNodeOU (Optional) is used to switch nodeou on and off + displayName: Disable Node OU + path: disablenodeou + - description: proxy ip passed if not OCP, domain for OCP Domain is the sub-domain + used for peer's deployment + displayName: Domain + path: domain + - description: HSM (Optional) is the paramters for the HSM if being used + displayName: HSM + path: hsm + - description: PKCS11Endpoint is the endpoint for the pkcs11 proxy + displayName: PKCS11 Endpoint + path: hsm.pkcs11endpoint + - description: ImagePullSecrets (Optional) is the list of ImagePullSecrets to + be used for peer's deployment + displayName: Image Pull Secrets + path: imagePullSecrets + - description: Images (Optional) lists the images to be used for peer's deployment + displayName: Images + path: images + - description: BuilderImage is the name of the builder image + displayName: Builder Image + path: images.builderImage + - description: BuilderTag is the tag of the builder image + displayName: Builder Tag + path: images.builderTag + - description: CCLauncherImage is the name of the chaincode launcher image + displayName: CCLauncher Image + path: images.chaincodeLauncherImage + - description: CCLauncherTag is the tag of the chaincode launcher image + displayName: CCLauncher Tag + path: images.chaincodeLauncherTag + - description: CouchDBImage is the name of the couchdb image + displayName: Couch DBImage + path: images.couchdbImage + - description: CouchDBTag is the tag of the couchdb image + displayName: Couch DBTag + path: images.couchdbTag + - description: DindImage is the name of the dind image + displayName: Dind Image + path: images.dindImage + - description: DindTag is the tag of the dind image + displayName: Dind Tag + path: images.dindTag + - description: EnrollerImage is the name of the init image for crypto generation + displayName: Enroller Image + path: images.enrollerImage + - description: EnrollerTag is the tag of the init image for crypto generation + displayName: Enroller Tag + path: images.enrollerTag + - description: FileTransferImage is the name of the file transfer image + displayName: File Transfer Image + path: images.fileTransferImage + - description: FileTransferTag is the tag of the file transfer image + displayName: File Transfer Tag + path: images.fileTransferTag + - description: FluentdImage is the name of the fluentd logger image + displayName: Fluentd Image + path: images.fluentdImage + - description: FluentdTag is the tag of the fluentd logger image + displayName: Fluentd Tag + path: images.fluentdTag + - description: GoEnvImage is the name of the goenv image + displayName: Go Env Image + path: images.goEnvImage + - description: GoEnvTag is the tag of the goenv image + displayName: Go Env Tag + path: images.goEnvTag + - description: GRPCWebImage is the name of the grpc web proxy image + displayName: GRPCWeb Image + path: images.grpcwebImage + - description: GRPCWebTag is the tag of the grpc web proxy image + displayName: GRPCWeb Tag + path: images.grpcwebTag + - description: HSMImage is the name of the hsm image + displayName: HSMImage + path: images.hsmImage + - description: HSMTag is the tag of the hsm image + displayName: HSMTag + path: images.hsmTag + - description: JavaEnvImage is the name of the javaenv image + displayName: Java Env Image + path: images.javaEnvImage + - description: JavaEnvTag is the tag of the javaenv image + displayName: Java Env Tag + path: images.javaEnvTag + - description: NodeEnvImage is the name of the nodeenv image + displayName: Node Env Image + path: images.nodeEnvImage + - description: NodeEnvTag is the tag of the nodeenv image + displayName: Node Env Tag + path: images.nodeEnvTag + - description: PeerImage is the name of the peer image + displayName: Peer Image + path: images.peerImage + - description: PeerInitImage is the name of the peer init image + displayName: Peer Init Image + path: images.peerInitImage + - description: PeerInitTag is the tag of the peer init image + displayName: Peer Init Tag + path: images.peerInitTag + - description: PeerTag is the tag of the peer image + displayName: Peer Tag + path: images.peerTag + - description: Ingress (Optional) is ingress object for ingress overrides + displayName: Ingress + path: ingress + - description: Class (Optional) is the class to set for ingress + displayName: Class + path: ingress.class + - description: TlsSecretName (Optional) is the secret name to be used for tls + certificates + displayName: Tls Secret Name + path: ingress.tlsSecretName + - description: License should be accepted by the user to be able to setup Peer + displayName: License + path: license + - description: Accept should be set to true to accept the license. + displayName: Accept + path: license.accept + value: + - false + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:checkbox + - description: peer specific configs MSPID is the msp id of the peer + displayName: MSPID + path: mspID + - description: msp data can be passed in secret on in spec MSPSecret (Optional) + is secret used to store msp crypto + displayName: MSPSecret + path: mspSecret + - description: NumSecondsWarningPeriod (Optional - default 30 days) is used + to define certificate expiry warning period. + displayName: Num Seconds Warning Period + path: numSecondsWarningPeriod + - description: PeerExternalEndpoint (Optional) is used to override peer external + endpoint + displayName: Peer External Endpoint + path: peerExternalEndpoint + - description: Region (Optional) is the region of the nodes where the peer should + be deployed + displayName: Region + path: region + - description: RegistryURL is registry url used to pull images + displayName: Registry URL + path: registryURL + - description: Replicas (Optional - default 1) is the number of peer replicas + to be setup + displayName: Replicas + path: replicas + - description: Resources (Optional) is the amount of resources to be provided + to peer deployment + displayName: Resources + path: resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CCLauncher (Optional) is the resources provided to the cclauncher + container + displayName: CCLauncher + path: resources.chaincodelauncher + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: CouchDB (Optional) is the resources provided to the couchdb container + displayName: Couch DB + path: resources.couchdb + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: DinD (Optional) is the resources provided to the dind container + displayName: Din D + path: resources.dind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Enroller (Optional) is the resources provided to the enroller + container + displayName: Enroller + path: resources.enroller + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: FluentD (Optional) is the resources provided to the fluentd container + displayName: Fluent D + path: resources.fluentd + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: HSMDaemon (Optional) is the resources provided to the HSM Daemon + container + displayName: HSMDaemon + path: resources.hsmdaemon + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Init (Optional) is the resources provided to the init container + displayName: Init + path: resources.init + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Peer (Optional) is the resources provided to the peer container + displayName: Peer + path: resources.peer + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: GRPCProxy (Optional) is the resources provided to the proxy container + displayName: GRPCProxy + path: resources.proxy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: Secret is object for msp crypto + displayName: Secret + path: secret + - description: Enrollment defines enrollment part of secret spec + displayName: Enrollment + path: secret.enrollment + - description: ClientAuth contains client uath enrollment details + displayName: Client Auth + path: secret.enrollment.clientauth + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.clientauth.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.clientauth.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.clientauth.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.clientauth.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.clientauth.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.clientauth.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.clientauth.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.clientauth.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.clientauth.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.clientauth.enrollsecret + - description: Component contains ecert enrollment details + displayName: Component + path: secret.enrollment.component + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.component.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.component.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.component.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.component.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.component.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.component.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.component.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.component.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.component.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.component.enrollsecret + - description: TLS contains tls enrollment details + displayName: TLS + path: secret.enrollment.tls + - description: AdminCerts is the base64 encoded admincerts + displayName: Admin Certs + path: secret.enrollment.tls.admincerts + - description: CAHost is host part of the CA to use + displayName: CAHost + path: secret.enrollment.tls.cahost + - description: CAName is name of CA + displayName: CAName + path: secret.enrollment.tls.caname + - description: CAPort is port of the CA to use + displayName: CAPort + path: secret.enrollment.tls.caport + - description: CATLS is tls details to talk to CA endpoint + displayName: CATLS + path: secret.enrollment.tls.catls + - description: CACert is the base64 encoded certificate + displayName: CACert + path: secret.enrollment.tls.catls.cacert + - description: CSR is the CSR override object + displayName: CSR + path: secret.enrollment.tls.csr + - description: Hosts override for CSR + displayName: Hosts + path: secret.enrollment.tls.csr.hosts + - description: EnrollID is the enrollment username + displayName: Enroll ID + path: secret.enrollment.tls.enrollid + - description: EnrollSecret is enrollment secret ( password ) + displayName: Enroll Secret + path: secret.enrollment.tls.enrollsecret + - description: MSP defines msp part of secret spec + displayName: MSP + path: secret.msp + - description: ClientAuth contains crypto for client auth certs + displayName: Client Auth + path: secret.msp.clientauth + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.clientauth.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.clientauth.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.clientauth.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.clientauth.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.clientauth.signcerts + - description: Component contains crypto for ecerts + displayName: Component + path: secret.msp.component + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.component.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.component.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.component.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.component.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.component.signcerts + - description: TLS contains crypto for tls certs + displayName: TLS + path: secret.msp.tls + - description: AdminCerts is base64 encoded admincerts array + displayName: Admin Certs + path: secret.msp.tls.admincerts + - description: CACerts is base64 encoded cacerts array + displayName: CACerts + path: secret.msp.tls.cacerts + - description: IntermediateCerts is base64 encoded intermediate certs array + displayName: Intermediate Certs + path: secret.msp.tls.intermediatecerts + - description: KeyStore is base64 encoded private key + displayName: Key Store + path: secret.msp.tls.keystore + - description: SignCerts is base64 encoded sign cert + displayName: Sign Certs + path: secret.msp.tls.signcerts + - description: Service (Optional) is the override object for peer's service + displayName: Service + path: service + - description: The "type" of the service to be used + displayName: Type + path: service.type + - description: StateDb (Optional) is the statedb used for peer, can be couchdb + or leveldb + displayName: State Db + path: stateDb + - description: Storage (Optional - uses default storageclass if not provided) + is the override object for peer's PVC config + displayName: Storage + path: storage + - description: Peer (Optional) is the configuration of the storage of the peer + displayName: Peer + path: storage.peer + - description: Class is the storage class + displayName: Class + path: storage.peer.class + - description: Size of storage + displayName: Size + path: storage.peer.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: StateDB (Optional) is the configuration of the storage of the + statedb + displayName: State DB + path: storage.statedb + - description: Class is the storage class + displayName: Class + path: storage.statedb.class + - description: Size of storage + displayName: Size + path: storage.statedb.size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: FabricVersion (Optional) is fabric version for the peer + displayName: Fabric Version + path: version + - description: Zone (Optional) is the zone of the nodes where the peer should + be deployed + displayName: Zone + path: zone + statusDescriptors: + - description: ErrorCode is the code of classification of errors + displayName: Error Code + path: errorcode + - description: LastHeartbeatTime is when the controller reconciled this component + displayName: Last Heartbeat Time + path: lastHeartbeatTime + - description: Message provides a message for the status to be shown to customer + displayName: Message + path: message + - description: Reason provides a reason for an error + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + - description: Status is defined based on the current status of the component + displayName: Status + path: status + - description: Type is true or false based on if status is valid + displayName: Type + path: type + - description: Version is the product version of the component + displayName: Version + path: version + - description: Versions is the operand version of the component + displayName: Versions + path: versions + version: v1beta1 + description: TODO + displayName: Fabric Opensource Operator + icon: + - base64data: PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIzMiIgaGVpZ2h0PSIzMiIgdmlld0JveD0iMCAwIDMyIDMyIj48cGF0aCBkPSJNMTYsMEExNiwxNiwwLDEsMCwzMiwxNiwxNiwxNiwwLDAsMCwxNiwwWk05LDIySDdWMTBIOVptMTMsM0gxMFYyM0gyMlpNMjIsOUgxMFY3SDIyWm0zLDEzSDIzVjEwaDJaIiBzdHlsZT0iZmlsbDojMTMxNzFhIi8+PC9zdmc+Cg== + mediatype: image/svg+xml + install: + spec: + deployments: + - name: operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + name: controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + control-plane: controller-manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - s390x + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=10 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + resources: {} + - args: + - --metrics-addr=127.0.0.1:8080 + - --enable-leader-election + command: + - /manager + image: controller:latest + name: manager + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi + - command: + - ibp-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: operator + - name: CLUSTERTYPE + value: OPENSHIFT + image: todo:update + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 10 + tcpSocket: + port: 8383 + timeoutSeconds: 5 + name: operator + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + tcpSocket: + port: 8383 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + ephemeral-storage: 1Gi + memory: 200Mi + requests: + cpu: 100m + ephemeral-storage: 100Mi + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 1001 + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1001 + serviceAccountName: operator-controller-manager + terminationGracePeriodSeconds: 10 + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - hyperledger + - fabric + maturity: alpha + provider: + name: Opensource + version: 1.0.0 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml new file mode 100644 index 00000000..7e0f4d39 --- /dev/null +++ b/config/manifests/kustomization.yaml @@ -0,0 +1,26 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: + - bases/fabric-opensource-operator.clusterserviceversion.yaml + - ../default + - ../samples + - ../scorecard +# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. +# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. +# These patches remove the unnecessary "cert" volume and its manager container volumeMount. +#patchesJson6902: +#- target: +# group: apps +# version: v1 +# kind: Deployment +# name: controller-manager +# namespace: system +# patch: |- +# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/containers/1/volumeMounts/0 +# # Remove the "cert" volume, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing volumes in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/volumes/0 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 00000000..ed137168 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 00000000..740dddb4 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,20 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 00000000..54c29664 --- /dev/null +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: + - nonResourceURLs: ["/metrics"] + verbs: ["get"] diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 00000000..618f5e41 --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 00000000..ec7acc0a --- /dev/null +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 00000000..6cf656be --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/config/rbac/ibpca_editor_role.yaml b/config/rbac/ibpca_editor_role.yaml new file mode 100644 index 00000000..380d5e8d --- /dev/null +++ b/config/rbac/ibpca_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ibpcas. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibpca-editor-role +rules: +- apiGroups: + - ibp.com + resources: + - ibpcas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ibp.com + resources: + - ibpcas/status + verbs: + - get diff --git a/config/rbac/ibpca_viewer_role.yaml b/config/rbac/ibpca_viewer_role.yaml new file mode 100644 index 00000000..04d249d4 --- /dev/null +++ b/config/rbac/ibpca_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ibpcas. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibpca-viewer-role +rules: +- apiGroups: + - ibp.com + resources: + - ibpcas + verbs: + - get + - list + - watch +- apiGroups: + - ibp.com + resources: + - ibpcas/status + verbs: + - get diff --git a/config/rbac/ibpconsole_editor_role.yaml b/config/rbac/ibpconsole_editor_role.yaml new file mode 100644 index 00000000..a686c800 --- /dev/null +++ b/config/rbac/ibpconsole_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ibpconsoles. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibpconsole-editor-role +rules: +- apiGroups: + - ibp.com + resources: + - ibpconsoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ibp.com + resources: + - ibpconsoles/status + verbs: + - get diff --git a/config/rbac/ibpconsole_viewer_role.yaml b/config/rbac/ibpconsole_viewer_role.yaml new file mode 100644 index 00000000..0b6d2849 --- /dev/null +++ b/config/rbac/ibpconsole_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ibpconsoles. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibpconsole-viewer-role +rules: +- apiGroups: + - ibp.com + resources: + - ibpconsoles + verbs: + - get + - list + - watch +- apiGroups: + - ibp.com + resources: + - ibpconsoles/status + verbs: + - get diff --git a/config/rbac/ibporderer_editor_role.yaml b/config/rbac/ibporderer_editor_role.yaml new file mode 100644 index 00000000..b23c1b52 --- /dev/null +++ b/config/rbac/ibporderer_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ibporderers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibporderer-editor-role +rules: +- apiGroups: + - ibp.com + resources: + - ibporderers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ibp.com + resources: + - ibporderers/status + verbs: + - get diff --git a/config/rbac/ibporderer_viewer_role.yaml b/config/rbac/ibporderer_viewer_role.yaml new file mode 100644 index 00000000..fb950834 --- /dev/null +++ b/config/rbac/ibporderer_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ibporderers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibporderer-viewer-role +rules: +- apiGroups: + - ibp.com + resources: + - ibporderers + verbs: + - get + - list + - watch +- apiGroups: + - ibp.com + resources: + - ibporderers/status + verbs: + - get diff --git a/config/rbac/ibppeer_editor_role.yaml b/config/rbac/ibppeer_editor_role.yaml new file mode 100644 index 00000000..2a297e33 --- /dev/null +++ b/config/rbac/ibppeer_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ibppeers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibppeer-editor-role +rules: +- apiGroups: + - ibp.com + resources: + - ibppeers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ibp.com + resources: + - ibppeers/status + verbs: + - get diff --git a/config/rbac/ibppeer_viewer_role.yaml b/config/rbac/ibppeer_viewer_role.yaml new file mode 100644 index 00000000..32d1995c --- /dev/null +++ b/config/rbac/ibppeer_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ibppeers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ibppeer-viewer-role +rules: +- apiGroups: + - ibp.com + resources: + - ibppeers + verbs: + - get + - list + - watch +- apiGroups: + - ibp.com + resources: + - ibppeers/status + verbs: + - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 00000000..f112db00 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,13 @@ +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml +- service_account.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..f2e2110e --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,39 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get \ No newline at end of file diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..1d1321ed --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 00000000..985b27ec --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,204 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "" + resources: + - pods + - pods/log + - persistentvolumeclaims + - persistentvolumes + - services + - endpoints + - events + - configmaps + - secrets + - nodes + - serviceaccounts + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "batch" + resources: + - jobs + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "authorization.openshift.io" + - "rbac.authorization.k8s.io" + resources: + - roles + - rolebindings + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - bind + - escalate + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - apps + resourceNames: + - operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - ibp.com + resources: + - ibpcas.ibp.com + - ibppeers.ibp.com + - ibporderers.ibp.com + - ibpconsoles.ibp.com + - ibpcas + - ibppeers + - ibporderers + - ibpconsoles + - ibpcas/finalizers + - ibppeers/finalizers + - ibporderers/finalizers + - ibpconsoles/finalizers + - ibpcas/status + - ibppeers/status + - ibporderers/status + - ibpconsoles/status + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - extensions + - networking.k8s.io + - config.openshift.io + resources: + - ingresses + - networkpolicies + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 00000000..63baeea5 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,30 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operator +subjects: + - kind: ServiceAccount + name: operator + namespace: placeholder +roleRef: + kind: ClusterRole + name: operator + apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 00000000..6214cbea --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,24 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller-manager +imagePullSecrets: + - name: regcred diff --git a/config/samples/ibp.com_v1beta1_ibpca.yaml b/config/samples/ibp.com_v1beta1_ibpca.yaml new file mode 100644 index 00000000..d5a45509 --- /dev/null +++ b/config/samples/ibp.com_v1beta1_ibpca.yaml @@ -0,0 +1,35 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: ibp.com/v1beta1 +kind: IBPCA +metadata: + name: org1ca + namespace: example +spec: + domain: "" + imagePullSecrets: + - regcred + license: + accept: false + replicas: 1 + storage: + ca: + class: "" + size: 100M + version: 1.4.9 diff --git a/config/samples/ibp.com_v1beta1_ibpconsole.yaml b/config/samples/ibp.com_v1beta1_ibpconsole.yaml new file mode 100644 index 00000000..91962fec --- /dev/null +++ b/config/samples/ibp.com_v1beta1_ibpconsole.yaml @@ -0,0 +1,40 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: ibp.com/v1beta1 +kind: IBPConsole +metadata: + name: ibpconsole + namespace: example +spec: + email: xyz@test.com + password: password + imagePullSecrets: + - regcred + registryURL: "" + license: + accept: false + networkinfo: + domain: "" + storage: + console: + class: "" + size: 5Gi + serviceAccountName: console + version: 1.0.0 + usetags: false diff --git a/config/samples/ibp.com_v1beta1_ibporderer.yaml b/config/samples/ibp.com_v1beta1_ibporderer.yaml new file mode 100644 index 00000000..70b73822 --- /dev/null +++ b/config/samples/ibp.com_v1beta1_ibporderer.yaml @@ -0,0 +1,53 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: ibp.com/v1beta1 +kind: IBPOrderer +metadata: + name: orderera + namespace: example +spec: + clusterSize: 5 + clustersecret: + - enrollment: {} + - enrollment: {} + - enrollment: {} + - enrollment: {} + - enrollment: {} + customNames: + pvc: {} + domain: "" + imagePullSecrets: + - regcred + license: + accept: false + location: + - {} + - {} + - {} + - {} + - {} + mspID: ordererorg + ordererType: etcdraft + orgName: ordererorg + storage: + orderer: + class: "" + size: 5G + systemChannelName: testchainid + version: 2.2.4 diff --git a/config/samples/ibp.com_v1beta1_ibppeer.yaml b/config/samples/ibp.com_v1beta1_ibppeer.yaml new file mode 100644 index 00000000..d94635ea --- /dev/null +++ b/config/samples/ibp.com_v1beta1_ibppeer.yaml @@ -0,0 +1,57 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: ibp.com/v1beta1 +kind: IBPPeer +metadata: + name: org1peer1 + namespace: example +spec: + domain: "" + imagePullSecrets: + - regcred + license: + accept: false + mspID: org1 + secret: + enrollment: + component: + cahost: "" + caname: "" + caport: "" + catls: + cacert: "" + enrollid: "" + enrollsecret: "" + tls: + cahost: "" + caname: "" + caport: "" + catls: + cacert: "" + enrollid: "" + enrollsecret: "" + stateDb: couchdb + storage: + peer: + class: "" + size: 5G + statedb: + class: "" + size: 5G + version: 2.2.4 diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 00000000..e69de29b diff --git a/config/scorecard/.osdk-scorecard.yaml b/config/scorecard/.osdk-scorecard.yaml new file mode 100644 index 00000000..ae5213c9 --- /dev/null +++ b/config/scorecard/.osdk-scorecard.yaml @@ -0,0 +1,17 @@ +scorecard: + output: json + bundle: bundle/manifests + plugins: + - basic: + cr-manifest: + - "config/samples/ibp.com_v1beta1_ibpca.yaml" + - "config/samples/ibp.com_v1beta1_ibpconsole.yaml" + - "config/samples/ibp.com_v1beta1_ibppeer.yaml" + - "config/samples/ibp.com_v1beta1_ibporderer.yaml" + - olm: + cr-manifest: + - "config/samples/ibp.com_v1beta1_ibpca.yaml" + - "config/samples/ibp.com_v1beta1_ibpconsole.yaml" + - "config/samples/ibp.com_v1beta1_ibppeer.yaml" + - "config/samples/ibp.com_v1beta1_ibporderer.yaml" + csv-path: "config/manifests/bases/fabric-opensource-operator.clusterserviceversion.yaml" diff --git a/config/scorecard/kustomization.yaml b/config/scorecard/kustomization.yaml new file mode 100644 index 00000000..e69de29b diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000..9cf26134 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000..25e21e3c --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000..31e0f829 --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,12 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/controllers/add_ibpca.go b/controllers/add_ibpca.go new file mode 100644 index 00000000..0e86efd6 --- /dev/null +++ b/controllers/add_ibpca.go @@ -0,0 +1,28 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/ibpca" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, ibpca.Add) +} diff --git a/controllers/add_ibpconsole.go b/controllers/add_ibpconsole.go new file mode 100644 index 00000000..2d36e10b --- /dev/null +++ b/controllers/add_ibpconsole.go @@ -0,0 +1,28 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + ibpconsole "github.com/IBM-Blockchain/fabric-operator/controllers/ibpconsole" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, ibpconsole.Add) +} diff --git a/controllers/add_ibporderer.go b/controllers/add_ibporderer.go new file mode 100644 index 00000000..aa99a4b2 --- /dev/null +++ b/controllers/add_ibporderer.go @@ -0,0 +1,28 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/ibporderer" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, ibporderer.Add) +} diff --git a/controllers/add_ibppeer.go b/controllers/add_ibppeer.go new file mode 100644 index 00000000..5b862ed6 --- /dev/null +++ b/controllers/add_ibppeer.go @@ -0,0 +1,28 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/ibppeer" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, ibppeer.Add) +} diff --git a/controllers/common/common.go b/controllers/common/common.go new file mode 100644 index 00000000..e7b366d7 --- /dev/null +++ b/controllers/common/common.go @@ -0,0 +1,145 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "context" + "fmt" + "reflect" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + IBPCA = "IBPCA" + IBPPEER = "IBPPeer" + IBPORDERER = "IBPOrderer" + IBPCONSOLE = "IBPConsole" +) + +type Client interface { + List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error +} + +// 1. Only one existing instance (of the same type as 'instance') should have +// the name 'instance.GetName()'; if more than one is present, return error +// 2. If any instance of a different type share the same name, return error +func ValidateCRName(k8sclient Client, name, namespace, kind string) error { + listOptions := &client.ListOptions{ + Namespace: namespace, + } + + count := 0 + + caList := ¤t.IBPCAList{} + err := k8sclient.List(context.TODO(), caList, listOptions) + if err != nil { + return err + } + for _, ca := range caList.Items { + if name == ca.Name { + if kind == IBPCA { + count++ + } else { + return fmt.Errorf("custom resource with name '%s' already exists", name) + } + } + } + + ordererList := ¤t.IBPOrdererList{} + err = k8sclient.List(context.TODO(), ordererList, listOptions) + if err != nil { + return err + } + for _, o := range ordererList.Items { + if name == o.Name { + if kind == IBPORDERER { + count++ + } else { + return fmt.Errorf("custom resource with name %s already exists", name) + } + } + } + + peerList := ¤t.IBPPeerList{} + err = k8sclient.List(context.TODO(), peerList, listOptions) + if err != nil { + return err + } + for _, p := range peerList.Items { + if name == p.Name { + if kind == IBPPEER { + count++ + } else { + return fmt.Errorf("custom resource with name %s already exists", name) + } + } + } + + consoleList := ¤t.IBPConsoleList{} + err = k8sclient.List(context.TODO(), consoleList, listOptions) + if err != nil { + return err + } + for _, c := range consoleList.Items { + if name == c.Name { + if kind == IBPCONSOLE { + count++ + } else { + return fmt.Errorf("custom resource with name %s already exists", name) + } + } + } + + if count > 1 { + return fmt.Errorf("custom resource with name %s already exists", name) + } + + return nil +} + +func MSPInfoUpdateDetected(oldSecret, newSecret *current.SecretSpec) bool { + if newSecret == nil || newSecret.MSP == nil { + return false + } + + if oldSecret == nil || oldSecret.MSP == nil { + if newSecret.MSP.Component != nil || newSecret.MSP.TLS != nil || newSecret.MSP.ClientAuth != nil { + return true + } + } else { + // For comparison purpose ignoring admin certs - admin cert updates + // detected in Initialize() code + if oldSecret.MSP.Component != nil && newSecret.MSP.Component != nil { + oldSecret.MSP.Component.AdminCerts = newSecret.MSP.Component.AdminCerts + } + if oldSecret.MSP.TLS != nil && newSecret.MSP.TLS != nil { + oldSecret.MSP.TLS.AdminCerts = newSecret.MSP.TLS.AdminCerts + } + if oldSecret.MSP.ClientAuth != nil && newSecret.MSP.ClientAuth != nil { + oldSecret.MSP.ClientAuth.AdminCerts = newSecret.MSP.ClientAuth.AdminCerts + } + + return !reflect.DeepEqual(oldSecret.MSP, newSecret.MSP) + } + + return false +} diff --git a/controllers/controller.go b/controllers/controller.go new file mode 100644 index 00000000..6983c7ba --- /dev/null +++ b/controllers/controller.go @@ -0,0 +1,37 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// AddToManagerFuncs is a list of functions to add all Controllers to the Manager +var AddToManagerFuncs []func(manager.Manager, *config.Config) error + +// AddToManager adds all Controllers to the Manager +func AddToManager(m manager.Manager, c *config.Config) error { + for _, f := range AddToManagerFuncs { + if err := f(m, c); err != nil { + return err + } + } + return nil +} diff --git a/controllers/ibpca/ibpca_controller.go b/controllers/ibpca/ibpca_controller.go new file mode 100644 index 00000000..937c618e --- /dev/null +++ b/controllers/ibpca/ibpca_controller.go @@ -0,0 +1,853 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpca + +import ( + "context" + "fmt" + "os" + "reflect" + "strings" + "sync" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoncontroller "github.com/IBM-Blockchain/fabric-operator/controllers/common" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + k8sca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca" + openshiftca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/go-test/deep" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + yaml "sigs.k8s.io/yaml" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + KIND = "IBPCA" +) + +var log = logf.Log.WithName("controller_ibpca") + +// Add creates a new IBPCA Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager, cfg *config.Config) error { + r, err := newReconciler(mgr, cfg) + if err != nil { + return err + } + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, cfg *config.Config) (*ReconcileIBPCA, error) { + client := k8sclient.New(mgr.GetClient(), &global.ConfigSetter{Config: cfg.Operator.Globals}) + scheme := mgr.GetScheme() + + ibpca := &ReconcileIBPCA{ + client: client, + scheme: scheme, + Config: cfg, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + RestartService: staggerrestarts.New(client, cfg.Operator.Restart.Timeout.Get()), + } + + switch cfg.Offering { + case offering.K8S: + ibpca.Offering = k8sca.New(client, scheme, cfg) + case offering.OPENSHIFT: + ibpca.Offering = openshiftca.New(client, scheme, cfg) + } + + return ibpca, nil +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r *ReconcileIBPCA) error { + // Create a new controller + predicateFuncs := predicate.Funcs{ + CreateFunc: r.CreateFunc, + UpdateFunc: r.UpdateFunc, + } + + c, err := controller.New("ibpca-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource IBPCA + err = c.Watch(&source.Kind{Type: ¤t.IBPCA{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + // Watch for changes to config maps (Create and Update funcs handle only watching for restart config map) + err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPCA{}, + }) + if err != nil { + return err + } + + // Watch for changes to tertiary resource Secrets and requeue the owner IBPPeer + err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPCA{}, + }, predicateFuncs) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileIBPCA{} + +//go:generate counterfeiter -o mocks/careconcile.go -fake-name CAReconcile . caReconcile +//counterfeiter:generate . caReconcile +type caReconcile interface { + Reconcile(*current.IBPCA, baseca.Update) (common.Result, error) +} + +// ReconcileIBPCA reconciles a IBPCA object +type ReconcileIBPCA struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client k8sclient.Client + scheme *runtime.Scheme + + Offering caReconcile + Config *config.Config + RestartService *staggerrestarts.StaggerRestartsService + + update map[string][]Update + mutex *sync.Mutex +} + +// Reconcile reads that state of the cluster for a IBPCA object and makes changes based on the state read +// and what is in the IBPCA.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=persistentvolumeclaims;persistentvolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes;routes/custom-host,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups="",resources=pods;pods/log;persistentvolumeclaims;persistentvolumes;services;endpoints;events;configmaps;secrets;nodes;serviceaccounts,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups="batch",resources=jobs,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups="authorization.openshift.io";"rbac.authorization.k8s.io",resources=roles;rolebinding,verbs=get;list;watch;create;update;patch;delete;deletecollection;bind;escalate +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get +// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;create +// +kubebuilder:rbac:groups=apps,resourceNames=ibp-operator,resources=deployments/finalizers,verbs=update +// +kubebuilder:rbac:groups=ibp.com,resources=ibpcas.ibp.com;ibppeers.ibp.com;ibporderers.ibp.com;ibpcas;ibppeers;ibporderers;ibpconsoles;ibpcas/finalizers;ibppeer/finalizers;ibporderers/finalizers;ibpconsole/finalizers;ibpcas/status;ibppeers/status;ibporderers/status;ibpconsoles/status,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=extensions;networking.k8s.io;config.openshift.io,resources=ingresses;networkpolicies,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete;deletecollection +func (r *ReconcileIBPCA) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + var err error + + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + + // If ca-restart-config configmap is the object being reconciled, reconcile the + // restart configmap. + if request.Name == "ca-restart-config" { + requeue, err := r.ReconcileRestart(request.Namespace) + // Error reconciling restart - requeue the request. + if err != nil { + return reconcile.Result{}, err + } + // Restart reconciled, requeue request if required. + return reconcile.Result{ + Requeue: requeue, + }, nil + } + + reqLogger.Info("Reconciling IBPCA") + + // Fetch the IBPCA instance + instance := ¤t.IBPCA{} + err = r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + reqLogger.Info(fmt.Sprintf("Current update stack to process: %+v", GetUpdateStack(r.update))) + + update := r.GetUpdateStatus(instance) + reqLogger.Info(fmt.Sprintf("Reconciling IBPCA '%s' with update values of [ %+v ]", instance.GetName(), update.GetUpdateStackWithTrues())) + + result, err := r.Offering.Reconcile(instance, r.PopUpdate(instance.GetName())) + setStatusErr := r.SetStatus(instance, result.Status, err) + if setStatusErr != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(setStatusErr, "failed to update status", log) + } + + if err != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(errors.Wrapf(err, "CA instance '%s' encountered error", instance.GetName()), "stopping reconcile loop", log) + } + + if result.Requeue { + r.PushUpdate(instance.GetName(), *update) + } + + reqLogger.Info(fmt.Sprintf("Finished reconciling IBPCA '%s' with update values of [ %+v ]", instance.GetName(), update.GetUpdateStackWithTrues())) + + // If the stack still has items that require processing, keep reconciling + // until the stack has been cleared + _, found := r.update[instance.GetName()] + if found { + if len(r.update[instance.GetName()]) > 0 { + return reconcile.Result{ + Requeue: true, + }, nil + } + } + + return result.Result, nil +} + +func (r *ReconcileIBPCA) SetStatus(instance *current.IBPCA, reconcileStatus *current.CRStatus, reconcileErr error) error { + log.Info(fmt.Sprintf("Setting status for '%s'", instance.GetName())) + + err := r.SaveSpecState(instance) + if err != nil { + return errors.Wrap(err, "failed to save spec state") + } + + err = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.GetName(), Namespace: instance.GetNamespace()}, instance) + if err != nil { + return err + } + + status := instance.Status.CRStatus + + if reconcileErr != nil { + status.Type = current.Error + status.Status = current.True + status.Reason = "errorOccurredDuringReconcile" + status.Message = reconcileErr.Error() + status.LastHeartbeatTime = time.Now().String() + status.ErrorCode = operatorerrors.GetErrorCode(reconcileErr) + + instance.Status = current.IBPCAStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status of IBPCA custom resource to %s phase", instance.Status.Type)) + err = r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPCA{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + + status.Versions.Reconciled = instance.Spec.FabricVersion + + // Check if reconcile loop returned an updated status that differs from exisiting status. + // If so, set status to the reconcile status. + if reconcileStatus != nil { + if instance.Status.Type != reconcileStatus.Type || instance.Status.Reason != reconcileStatus.Reason || instance.Status.Message != reconcileStatus.Message { + status.Type = reconcileStatus.Type + status.Status = current.True + status.Reason = reconcileStatus.Reason + status.Message = reconcileStatus.Message + status.LastHeartbeatTime = time.Now().String() + + instance.Status = current.IBPCAStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status of IBPPeer custom resource to %s phase", instance.Status.Type)) + err := r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPCA{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + } + + running, err := r.PodsRunning(instance) + if err != nil { + return err + } + + if running { + if instance.Status.Type == current.Deployed { + return nil + } + status.Type = current.Deployed + status.Status = current.True + status.Reason = "allPodsRunning" + status.Message = "All pods running" + } else { + if instance.Status.Type == current.Deploying { + return nil + } + status.Type = current.Deploying + status.Status = current.True + status.Reason = "waitingForPods" + status.Message = "Waiting for pods" + } + + instance.Status = current.IBPCAStatus{ + CRStatus: status, + } + instance.Status.LastHeartbeatTime = time.Now().String() + log.Info(fmt.Sprintf("Updating status of IBPCA custom resource to %s phase", instance.Status.Type)) + err = r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPCA{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPCA) SaveSpecState(instance *current.IBPCA) error { + data, err := yaml.Marshal(instance.Spec) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + }, + BinaryData: map[string][]byte{ + "spec": data, + }, + } + + err = r.client.CreateOrUpdate(context.TODO(), cm, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: r.scheme, + }) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPCA) GetSpecState(instance *current.IBPCA) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + } + + err := r.client.Get(context.TODO(), nn, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func (r *ReconcileIBPCA) PodsRunning(instance *current.IBPCA) (bool, error) { + labelSelector, err := labels.Parse(fmt.Sprintf("app=%s", instance.GetName())) + if err != nil { + return false, errors.Wrap(err, "failed to parse label selector for app name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: instance.GetNamespace(), + } + + podList := &corev1.PodList{} + err = r.client.List(context.TODO(), podList, listOptions) + if err != nil { + return false, err + } + + if len(podList.Items) == 0 { + return false, nil + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + } + + return true, nil +} + +func (r *ReconcileIBPCA) getIgnoreDiffs() []string { + return []string{ + `Template\.Spec\.Containers\.slice\[\d\]\.Resources\.Requests\.map\[memory\].s`, + `Template\.Spec\.InitContainers\.slice\[\d\]\.Resources`, + `Ports\.slice\[\d\]\.Protocol`, + } +} + +func (r *ReconcileIBPCA) getLabels(instance v1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app": instance.GetName(), + "creator": label, + "release": "operator", + "helm.sh/chart": "ibm-" + label, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "ca", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (r *ReconcileIBPCA) getSelectorLabels(instance v1.Object) map[string]string { + return map[string]string{ + "app": instance.GetName(), + } +} + +// TODO: Move to predicate.go +func (r *ReconcileIBPCA) CreateFunc(e event.CreateEvent) bool { + update := Update{} + + switch e.Object.(type) { + case *current.IBPCA: + ca := e.Object.(*current.IBPCA) + log.Info(fmt.Sprintf("Create event detected for ca '%s'", ca.GetName())) + + // Operator restart detected, want to trigger update logic for CA resource if changes detected + if ca.Status.HasType() { + log.Info(fmt.Sprintf("Operator restart detected, running update flow on existing ca '%s'", ca.GetName())) + + // Get the spec state of the resource before the operator went down, this + // will be used to compare to see if the spec of resources has changed + cm, err := r.GetSpecState(ca) + if err != nil { + log.Info(fmt.Sprintf("Failed getting saved ca spec '%s', triggering create: %s", ca.GetName(), err.Error())) + return true + } + + specBytes := cm.BinaryData["spec"] + existingCA := ¤t.IBPCA{} + err = yaml.Unmarshal(specBytes, &existingCA.Spec) + if err != nil { + log.Info(fmt.Sprintf("Unmarshal failed for saved ca spec '%s', triggering create: %s", ca.GetName(), err.Error())) + return true + } + + diff := deep.Equal(ca.Spec, existingCA.Spec) + if diff != nil { + log.Info(fmt.Sprintf("IBPCA '%s' spec was updated while operator was down", ca.GetName())) + log.Info(fmt.Sprintf("Difference detected: %s", diff)) + update.specUpdated = true + } + + // If existing CA spec did not have config overrides defined but new spec does, + // trigger update logic for both CA and TLSCA overrides + if ca.Spec.ConfigOverride == nil && existingCA.Spec.ConfigOverride != nil { + log.Info(fmt.Sprintf("IBPCA '%s' CA and TLSCA overrides were updated while operator was down", ca.GetName())) + update.caOverridesUpdated = true + update.tlscaOverridesUpdated = true + } + + // If existing CA spec had config overrides defined, need to further check to see if CA or + // TLSCA specs have been updated and trigger update for the one on which updates are detected. + if ca.Spec.ConfigOverride != nil && existingCA.Spec.ConfigOverride != nil { + if ca.Spec.ConfigOverride.CA != nil && existingCA.Spec.ConfigOverride.CA != nil { + if !reflect.DeepEqual(ca.Spec.ConfigOverride.CA, existingCA.Spec.ConfigOverride.CA) { + log.Info(fmt.Sprintf("IBPCA '%s' CA overrides were updated while operator was down", ca.GetName())) + update.caOverridesUpdated = true + } + } + + if ca.Spec.ConfigOverride.TLSCA != nil && existingCA.Spec.ConfigOverride.TLSCA != nil { + if !reflect.DeepEqual(ca.Spec.ConfigOverride.TLSCA, existingCA.Spec.ConfigOverride.TLSCA) { + log.Info(fmt.Sprintf("IBPCA '%s' TLSCA overrides were updated while operator was down", ca.GetName())) + update.tlscaOverridesUpdated = true + } + } + } + + update.imagesUpdated = imagesUpdated(existingCA, ca) + update.fabricVersionUpdated = fabricVersionUpdated(existingCA, ca) + + log.Info(fmt.Sprintf("Create event triggering reconcile for updating ca '%s'", ca.GetName())) + r.PushUpdate(ca.GetName(), update) + return true + } + + // TODO: This seems more appropriate for the PreReconcileCheck method rather than the predicate function. Not + // sure if there was reason for putting it here, but if not we should consider moving it + // + // If creating resource for the first time, check that a unique name is provided + err := commoncontroller.ValidateCRName(r.client, ca.Name, ca.Namespace, commoncontroller.IBPCA) + if err != nil { + log.Error(err, "failed to validate ca name") + operror := operatorerrors.Wrap(err, operatorerrors.InvalidCustomResourceCreateRequest, "failed to validate custom resource name") + + err = r.SetStatus(ca, nil, operror) + if err != nil { + log.Error(err, "failed to set status to error", "ca.name", ca.Name, "error", "InvalidCustomResourceCreateRequest") + } + return false + } + + log.Info(fmt.Sprintf("Create event triggering reconcile for creating ca '%s'", ca.GetName())) + + case *corev1.Secret: + secret := e.Object.(*corev1.Secret) + + if secret.OwnerReferences == nil || len(secret.OwnerReferences) == 0 { + isCASecret, err := r.AddOwnerReferenceToSecret(secret) + if err != nil || !isCASecret { + return false + } + } + + if secret.OwnerReferences[0].Kind == KIND { + instanceName := secret.OwnerReferences[0].Name + log.Info(fmt.Sprintf("Create event detected for secret '%s'", secret.GetName())) + + if strings.HasSuffix(secret.Name, "-ca-crypto") { + update.caCryptoCreated = true + log.Info(fmt.Sprintf("CA crypto created, triggering reconcile for IBPCA custom resource %s: update [ %+v ]", instanceName, update.GetUpdateStackWithTrues())) + } else { + return false + } + + r.PushUpdate(instanceName, update) + } + + case *appsv1.Deployment: + dep := e.Object.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Create event detected by IBPCA controller for deployment '%s', triggering reconcile", dep.GetName())) + + case *corev1.ConfigMap: + cm := e.Object.(*corev1.ConfigMap) + if cm.Name == "ca-restart-config" { + log.Info(fmt.Sprintf("Create event detected by IBPCA contoller for config map '%s', triggering restart reconcile", cm.GetName())) + } else { + return false + } + } + + return true +} + +// TODO: Move to predicate.go +func (r *ReconcileIBPCA) UpdateFunc(e event.UpdateEvent) bool { + update := Update{} + + switch e.ObjectOld.(type) { + case *current.IBPCA: + oldCA := e.ObjectOld.(*current.IBPCA) + newCA := e.ObjectNew.(*current.IBPCA) + log.Info(fmt.Sprintf("Update event detected for ca '%s'", oldCA.GetName())) + + if util.CheckIfZoneOrRegionUpdated(oldCA.Spec.Zone, newCA.Spec.Zone) { + log.Error(errors.New("Zone update is not allowed"), "invalid spec update") + return false + } + + if util.CheckIfZoneOrRegionUpdated(oldCA.Spec.Region, newCA.Spec.Region) { + log.Error(errors.New("Region update is not allowed"), "invalid spec update") + return false + } + + if reflect.DeepEqual(oldCA.Spec, newCA.Spec) { + return false + } + + update.specUpdated = true + + // Check for changes to ca tag to determine if any migration logic needs to be executed + if oldCA.Spec.Images != nil && newCA.Spec.Images != nil { + if oldCA.Spec.Images.CATag != newCA.Spec.Images.CATag { + log.Info(fmt.Sprintf("CA tag update from %s to %s", oldCA.Spec.Images.CATag, newCA.Spec.Images.CATag)) + update.caTagUpdated = true + } + } + + if oldCA.Spec.ConfigOverride == nil { + if newCA.Spec.ConfigOverride != nil { + update.caOverridesUpdated = true + update.tlscaOverridesUpdated = true + } + } else { + if !reflect.DeepEqual(oldCA.Spec.ConfigOverride.CA, newCA.Spec.ConfigOverride.CA) { + update.caOverridesUpdated = true + } + + if !reflect.DeepEqual(oldCA.Spec.ConfigOverride.TLSCA, newCA.Spec.ConfigOverride.TLSCA) { + update.tlscaOverridesUpdated = true + } + } + + if newCA.Spec.Action.Restart == true { + update.restartNeeded = true + } + + if newCA.Spec.Action.Renew.TLSCert == true { + update.renewTLSCert = true + } + + update.imagesUpdated = imagesUpdated(oldCA, newCA) + update.fabricVersionUpdated = fabricVersionUpdated(oldCA, newCA) + + log.Info(fmt.Sprintf("Spec update triggering reconcile on IBPCA custom resource %s: update [ %+v ]", oldCA.Name, update.GetUpdateStackWithTrues())) + r.PushUpdate(oldCA.GetName(), update) + return true + + case *corev1.Secret: + oldSecret := e.ObjectOld.(*corev1.Secret) + newSecret := e.ObjectNew.(*corev1.Secret) + + if oldSecret.OwnerReferences == nil || len(oldSecret.OwnerReferences) == 0 { + isCASecret, err := r.AddOwnerReferenceToSecret(oldSecret) + if err != nil || !isCASecret { + return false + } + } + + if oldSecret.OwnerReferences[0].Kind == KIND { + if reflect.DeepEqual(oldSecret.Data, newSecret.Data) { + return false + } + + instanceName := oldSecret.OwnerReferences[0].Name + log.Info(fmt.Sprintf("Update event detected for secret '%s'", oldSecret.GetName())) + + if util.IsSecretTLSCert(oldSecret.Name) { + update.caCryptoUpdated = true + } else { + return false + } + + log.Info(fmt.Sprintf("CA crypto update triggering reconcile on IBPCA custom resource %s: update [ %+v ]", instanceName, update.GetUpdateStackWithTrues())) + r.PushUpdate(instanceName, update) + return true + } + + case *appsv1.Deployment: + dep := e.ObjectOld.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Spec update detected by IBPCA controller for deployment '%s'", dep.GetName())) + + case *corev1.ConfigMap: + cm := e.ObjectOld.(*corev1.ConfigMap) + if cm.Name == "ca-restart-config" { + log.Info("Update event detected for ca-restart-config, triggering restart reconcile") + return true + } + + } + + return false +} + +func (r *ReconcileIBPCA) GetUpdateStatusAtElement(instance *current.IBPCA, index int) *Update { + r.mutex.Lock() + defer r.mutex.Unlock() + + update := Update{} + _, ok := r.update[instance.GetName()] + if !ok { + return &update + } + + if len(r.update[instance.GetName()]) >= 1 { + update = r.update[instance.GetName()][index] + } + + return &update +} + +func (r *ReconcileIBPCA) GetUpdateStatus(instance *current.IBPCA) *Update { + return r.GetUpdateStatusAtElement(instance, 0) +} + +func (r *ReconcileIBPCA) PushUpdate(instance string, update Update) { + r.mutex.Lock() + defer r.mutex.Unlock() + + r.update[instance] = r.AppendUpdateIfMissing(r.update[instance], update) +} + +func (r *ReconcileIBPCA) PopUpdate(instance string) *Update { + r.mutex.Lock() + defer r.mutex.Unlock() + + update := Update{} + if len(r.update[instance]) >= 1 { + update = r.update[instance][0] + if len(r.update[instance]) == 1 { + r.update[instance] = []Update{} + } else { + r.update[instance] = r.update[instance][1:] + } + } + + return &update +} + +func (r *ReconcileIBPCA) AppendUpdateIfMissing(updates []Update, update Update) []Update { + for _, u := range updates { + if u == update { + return updates + } + } + return append(updates, update) +} + +func (r *ReconcileIBPCA) AddOwnerReferenceToSecret(secret *corev1.Secret) (bool, error) { + // CA secrets we are looking to add owner references to are named: + // -ca + // -ca-crypto + // -tlsca + // -tlsca-crypto + + items := strings.Split(secret.Name, "-") + var instanceName string + + if strings.Contains(secret.Name, "-ca-crypto") || strings.Contains(secret.Name, "-tlsca-crypto") { + // If -ca-crypto or -tlsca-crypto, construct instance name from all but last 2 items + instanceName = strings.Join(items[:len(items)-2], "-") + } else if strings.Contains(secret.Name, "-ca") || strings.Contains(secret.Name, "-tlsca") { + // If -ca-crypto or -tlsca-crypto, construct instance name from all but last item + instanceName = strings.Join(items[:len(items)-1], "-") + } else { + return false, nil + } + + listOptions := &client.ListOptions{ + Namespace: secret.Namespace, + } + + caList := ¤t.IBPCAList{} + err := r.client.List(context.TODO(), caList, listOptions) + if err != nil { + return false, errors.Wrap(err, "failed to get list of CAs") + } + + for _, o := range caList.Items { + ca := o + if ca.Name == instanceName { + // Instance 'i' found in list of orderers + err = r.client.Update(context.TODO(), secret, k8sclient.UpdateOption{ + Owner: &ca, + Scheme: r.scheme, + }) + if err != nil { + return false, err + } + return true, nil + } + } + + return false, nil +} + +func (r *ReconcileIBPCA) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(¤t.IBPCA{}). + Complete(r) +} + +func GetUpdateStack(allUpdates map[string][]Update) string { + stack := "" + + for orderer, updates := range allUpdates { + currentStack := "" + for index, update := range updates { + currentStack += fmt.Sprintf("{ %s}", update.GetUpdateStackWithTrues()) + if index != len(updates)-1 { + currentStack += " , " + } + } + stack += fmt.Sprintf("%s: [ %s ] ", orderer, currentStack) + } + + return stack +} + +func (r *ReconcileIBPCA) ReconcileRestart(namespace string) (bool, error) { + requeue, err := r.RestartService.Reconcile("ca", namespace) + if err != nil { + log.Error(err, "failed to reconcile restart queues in ca-restart-config") + return false, err + } + + return requeue, nil +} diff --git a/controllers/ibpca/ibpca_controller_test.go b/controllers/ibpca/ibpca_controller_test.go new file mode 100644 index 00000000..d00af8a3 --- /dev/null +++ b/controllers/ibpca/ibpca_controller_test.go @@ -0,0 +1,355 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpca + +import ( + "context" + "errors" + "fmt" + "sync" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + camocks "github.com/IBM-Blockchain/fabric-operator/controllers/ibpca/mocks" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ReconcileIBPCA", func() { + var ( + reconciler *ReconcileIBPCA + request reconcile.Request + mockKubeClient *mocks.Client + mockCAReconcile *camocks.CAReconcile + instance *current.IBPCA + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + mockCAReconcile = &camocks.CAReconcile{} + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{}, + } + instance.Name = "test-ca" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPCA: + o := obj.(*current.IBPCA) + o.Kind = "IBPCA" + o.Name = instance.Name + + instance.Status = o.Status + } + return nil + } + + mockKubeClient.UpdateStatusStub = func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + switch obj.(type) { + case *current.IBPCA: + o := obj.(*current.IBPCA) + instance.Status = o.Status + } + return nil + } + + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.NodeList: + nodeList := obj.(*corev1.NodeList) + node := corev1.Node{} + node.Labels = map[string]string{} + node.Labels["topology.kubernetes.io/zone"] = "dal" + node.Labels["topology.kubernetes.io/region"] = "us-south" + nodeList.Items = append(nodeList.Items, node) + case *current.IBPCAList: + caList := obj.(*current.IBPCAList) + ca1 := current.IBPCA{} + ca1.Name = "test-ca1" + ca2 := current.IBPCA{} + ca2.Name = "test-ca2" + ca3 := current.IBPCA{} + ca3.Name = "test-ca2" + caList.Items = []current.IBPCA{ca1, ca2, ca3} + case *current.IBPPeerList: + caList := obj.(*current.IBPPeerList) + p1 := current.IBPPeer{} + p1.Name = "test-peer" + caList.Items = []current.IBPPeer{p1} + } + return nil + } + + reconciler = &ReconcileIBPCA{ + Offering: mockCAReconcile, + client: mockKubeClient, + scheme: &runtime.Scheme{}, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + } + request = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test-namespace", + Name: "test", + }, + } + }) + + Context("Reconciles", func() { + It("does not return an error if the custom resource is 'not found'", func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error if the request to get custom resource return any other error besides 'not found'", func() { + alreadyExistsErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Message: "already exists", + Reason: metav1.StatusReasonAlreadyExists, + }, + } + mockKubeClient.GetReturns(alreadyExistsErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("already exists")) + }) + + It("returns an error if it encountered a non-breaking error", func() { + errMsg := "failed to reconcile deployment encountered breaking error" + mockCAReconcile.ReconcileReturns(common.Result{}, errors.New(errMsg)) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("CA instance '%s' encountered error: %s", instance.Name, errMsg))) + }) + + It("does not return an error if it encountered a breaking error", func() { + mockCAReconcile.ReconcileReturns(common.Result{}, operatorerrors.New(operatorerrors.InvalidDeploymentCreateRequest, "failed to reconcile deployment encountered breaking error")) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update reconcile", func() { + var ( + oldCA *current.IBPCA + newCA *current.IBPCA + e event.UpdateEvent + ) + + BeforeEach(func() { + caConfig := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CA: v1.CAInfo{ + Name: "old-ca-name", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(caConfig) + Expect(err).NotTo(HaveOccurred()) + + oldCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPCASpec{ + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + }, + }, + } + + newcaConfig := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CA: v1.CAInfo{ + Name: "new-ca-name", + }, + }, + } + newcaJson, err := util.ConvertToJsonMessage(newcaConfig) + Expect(err).NotTo(HaveOccurred()) + + newCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPCASpec{ + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *newcaJson}, + }, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldCA, + ObjectNew: newCA, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPCASpec{ + ImagePullSecrets: []string{"old-secret"}, + }, + } + + newCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPCASpec{ + ImagePullSecrets: []string{"new-secret"}, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldCA, + ObjectNew: newCA, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + }) + + It("properly pops update flags from stack", func() { + Expect(reconciler.GetUpdateStatus(instance).CAOverridesUpdated()).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + + Expect(reconciler.GetUpdateStatus(instance).CAOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(true)) + + _, err = reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + + Expect(reconciler.GetUpdateStatus(instance).CAOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).TLSCAOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(false)) + }) + }) + + Context("set status", func() { + It("sets the status to error if error occured during IBPCA reconciliation", func() { + reconciler.SetStatus(instance, nil, errors.New("ibpca error")) + Expect(instance.Status.Type).To(Equal(current.Error)) + Expect(instance.Status.Message).To(Equal("ibpca error")) + }) + + It("sets the status to deploying if pod is not yet running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + podList := obj.(*corev1.PodList) + pod := corev1.Pod{} + podList.Items = append(podList.Items, pod) + return nil + } + reconciler.SetStatus(instance, nil, nil) + Expect(instance.Status.Type).To(Equal(current.Deploying)) + }) + + It("sets the status to deployed if pod is running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + podList := obj.(*corev1.PodList) + pod := corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + } + podList.Items = append(podList.Items, pod) + return nil + } + + reconciler.SetStatus(instance, nil, nil) + Expect(instance.Status.Type).To(Equal(current.Deployed)) + }) + }) + + Context("add owner reference to secret", func() { + var ( + secret *corev1.Secret + ) + + BeforeEach(func() { + secret = &corev1.Secret{} + secret.Name = "test-ca1-ca-crypto" + }) + + It("returns error if fails to get list of CAs", func() { + mockKubeClient.ListReturns(errors.New("list error")) + _, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("list error")) + }) + + It("returns false if secret doesn't belong to any CAs in list", func() { + secret.Name = "invalidca-ca-crypto" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(false)) + }) + + It("returns true if owner references added to ca crypto secret", func() { + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + + It("returns true if owner references added to tlsca crypto secret", func() { + secret.Name = "test-ca2-tlsca-crypto" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + + It("returns true if owner references added to ca secret", func() { + secret.Name = "test-ca2-ca" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + + It("returns true if owner references added to tlsca secret", func() { + secret.Name = "test-ca2-tlsca" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + }) +}) diff --git a/controllers/ibpca/ibpca_suite_test.go b/controllers/ibpca/ibpca_suite_test.go new file mode 100644 index 00000000..a9c9ae47 --- /dev/null +++ b/controllers/ibpca/ibpca_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpca_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestIbpca(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ibpca Suite") +} diff --git a/controllers/ibpca/mocks/careconcile.go b/controllers/ibpca/mocks/careconcile.go new file mode 100644 index 00000000..dfc2e9e5 --- /dev/null +++ b/controllers/ibpca/mocks/careconcile.go @@ -0,0 +1,118 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" +) + +type CAReconcile struct { + ReconcileStub func(*v1beta1.IBPCA, baseca.Update) (common.Result, error) + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 baseca.Update + } + reconcileReturns struct { + result1 common.Result + result2 error + } + reconcileReturnsOnCall map[int]struct { + result1 common.Result + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CAReconcile) Reconcile(arg1 *v1beta1.IBPCA, arg2 baseca.Update) (common.Result, error) { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 baseca.Update + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAReconcile) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *CAReconcile) ReconcileCalls(stub func(*v1beta1.IBPCA, baseca.Update) (common.Result, error)) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *CAReconcile) ReconcileArgsForCall(i int) (*v1beta1.IBPCA, baseca.Update) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CAReconcile) ReconcileReturns(result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *CAReconcile) ReconcileReturnsOnCall(i int, result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 common.Result + result2 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *CAReconcile) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CAReconcile) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} diff --git a/controllers/ibpca/predicate.go b/controllers/ibpca/predicate.go new file mode 100644 index 00000000..e2547667 --- /dev/null +++ b/controllers/ibpca/predicate.go @@ -0,0 +1,154 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpca + +import ( + "reflect" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" +) + +// Update defines a list of elements that we detect spec updates on +type Update struct { + specUpdated bool + caOverridesUpdated bool + tlscaOverridesUpdated bool + restartNeeded bool + caCryptoUpdated bool + caCryptoCreated bool + renewTLSCert bool + imagesUpdated bool + fabricVersionUpdated bool + caTagUpdated bool + // update GetUpdateStackWithTrues when new fields are added +} + +// SpecUpdated returns true if any fields in spec are updated +func (u *Update) SpecUpdated() bool { + return u.specUpdated +} + +// CAOverridesUpdated returns true if ca config overrides updated +func (u *Update) CAOverridesUpdated() bool { + return u.caOverridesUpdated +} + +// TLSCAOverridesUpdated returns true if TLS ca config overrides updated +func (u *Update) TLSCAOverridesUpdated() bool { + return u.tlscaOverridesUpdated +} + +// ConfigOverridesUpdated returns true if either ca or TLS ca overrides updated +func (u *Update) ConfigOverridesUpdated() bool { + return u.caOverridesUpdated || u.tlscaOverridesUpdated +} + +// RestartNeeded returns true if changes in spec require components to restart +func (u *Update) RestartNeeded() bool { + return u.restartNeeded +} + +// CACryptoUpdated returns true if crypto material updated +func (u *Update) CACryptoUpdated() bool { + return u.caCryptoUpdated +} + +// CACryptoCreated returns true if crypto material created +func (u *Update) CACryptoCreated() bool { + return u.caCryptoCreated +} + +// RenewTLSCert returns true if need to renew TLS cert +func (u *Update) RenewTLSCert() bool { + return u.renewTLSCert +} + +// ImagesUpdated returns true if images updated +func (u *Update) ImagesUpdated() bool { + return u.imagesUpdated +} + +// FabricVersionUpdated returns true if fabric version updated +func (u *Update) FabricVersionUpdated() bool { + return u.fabricVersionUpdated +} + +func (u *Update) CATagUpdated() bool { + return u.caTagUpdated +} + +// GetUpdateStackWithTrues is a helper method to print updates that have been detected +func (u *Update) GetUpdateStackWithTrues() string { + stack := "" + + if u.specUpdated { + stack += "specUpdated " + } + if u.caOverridesUpdated { + stack += "caOverridesUpdated " + } + if u.tlscaOverridesUpdated { + stack += "tlscaOverridesUpdated " + } + if u.restartNeeded { + stack += "restartNeeded " + } + if u.caCryptoUpdated { + stack += "caCryptoUpdated " + } + if u.caCryptoCreated { + stack += "caCryptoCreated " + } + if u.renewTLSCert { + stack += "renewTLSCert " + } + if u.imagesUpdated { + stack += "imagesUpdated " + } + if u.fabricVersionUpdated { + stack += "fabricVersionUpdated " + } + if u.caTagUpdated { + stack += "caTagUpdated " + } + + if len(stack) == 0 { + stack = "emptystack " + } + + return stack +} + +func imagesUpdated(old, new *current.IBPCA) bool { + if new.Spec.Images != nil { + if old.Spec.Images == nil { + return true + } + + if old.Spec.Images != nil { + return !reflect.DeepEqual(old.Spec.Images, new.Spec.Images) + } + } + + return false +} + +func fabricVersionUpdated(old, new *current.IBPCA) bool { + return old.Spec.FabricVersion != new.Spec.FabricVersion +} diff --git a/controllers/ibpca/predicate_test.go b/controllers/ibpca/predicate_test.go new file mode 100644 index 00000000..e97d0ff6 --- /dev/null +++ b/controllers/ibpca/predicate_test.go @@ -0,0 +1,496 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpca + +import ( + "context" + "fmt" + "sync" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + yaml "sigs.k8s.io/yaml" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("predicates", func() { + var ( + reconciler *ReconcileIBPCA + client *mocks.Client + oldCA, newCA *current.IBPCA + ) + + Context("create func predicate", func() { + var ( + e event.CreateEvent + ) + + BeforeEach(func() { + oldCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ca1", + }, + Spec: current.IBPCASpec{}, + } + + newCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: oldCA.GetName(), + }, + Status: current.IBPCAStatus{ + CRStatus: current.CRStatus{ + Type: current.Deployed, + }, + }, + } + + e = event.CreateEvent{ + Object: newCA, + } + + client = &mocks.Client{ + GetStub: func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + cm := obj.(*corev1.ConfigMap) + bytes, err := yaml.Marshal(oldCA.Spec) + Expect(err).NotTo((HaveOccurred())) + cm.BinaryData = map[string][]byte{ + "spec": bytes, + } + } + + return nil + }, + ListStub: func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *current.IBPCAList: + caList := obj.(*current.IBPCAList) + caList.Items = []current.IBPCA{ + {ObjectMeta: metav1.ObjectMeta{Name: "test-ca1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "test-ca2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "test-ca2"}}, + } + case *current.IBPPeerList: + peerList := obj.(*current.IBPPeerList) + peerList.Items = []current.IBPPeer{ + {ObjectMeta: metav1.ObjectMeta{Name: "test-peer"}}, + } + } + return nil + }, + } + + reconciler = &ReconcileIBPCA{ + update: map[string][]Update{}, + client: client, + mutex: &sync.Mutex{}, + } + }) + + It("sets update flags to false if instance has status type and a create event is detected but no spec changes detected", func() { + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: false, + caOverridesUpdated: false, + tlscaOverridesUpdated: false, + })) + }) + + It("sets update flags to true if instance has status type and a create event is detected and spec changes detected", func() { + jm, err := util.ConvertToJsonMessage(&v1.ServerConfig{}) + Expect(err).NotTo(HaveOccurred()) + + spec := current.IBPCASpec{ + ImagePullSecrets: []string{"pullsecret1"}, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *jm}, + TLSCA: &runtime.RawExtension{Raw: *jm}, + }, + } + binaryData, err := yaml.Marshal(spec) + Expect(err).NotTo(HaveOccurred()) + + client.GetStub = func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + o.BinaryData = map[string][]byte{ + "spec": binaryData, + } + } + return nil + } + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: true, + caOverridesUpdated: true, + tlscaOverridesUpdated: true, + })) + }) + + It("does not trigger update if instance does not have status type and a create event is detected", func() { + newCA.Status.Type = "" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{})) + }) + + It("returns false if new instance's name already exists for another custom resource", func() { + newCA.Status.Type = "" + newCA.Name = "test-peer" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + Expect(newCA.Status.Type).To(Equal(current.Error)) + }) + + It("returns false if new instance's name already exists for another IBPCA custom resource", func() { + newCA.Status.Type = "" + newCA.Name = "test-ca2" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + Expect(newCA.Status.Type).To(Equal(current.Error)) + }) + + Context("fabric version", func() { + It("returns no updates when fabric version is not changed", func() { + reconciler.CreateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{})) + }) + + When("fabric version updated", func() { + BeforeEach(func() { + newCA.Spec.FabricVersion = "2.2.1-1" + }) + + It("sets fabric version to true on version change", func() { + reconciler.CreateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: true, + fabricVersionUpdated: true, + })) + }) + }) + }) + + Context("images", func() { + It("returns no updates when images are not changed", func() { + reconciler.CreateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{})) + }) + + When("images updated", func() { + BeforeEach(func() { + newCA.Spec.Images = ¤t.CAImages{ + CAImage: "caimage2", + } + }) + + It("sets imagesUpdated to true on image nil to non-nil update", func() { + reconciler.CreateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: true, + imagesUpdated: true, + })) + }) + + It("sets imagesUpdated to true on image changes", func() { + oldCA.Spec.Images = ¤t.CAImages{ + CAImage: "caimage1", + } + + reconciler.CreateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: true, + imagesUpdated: true, + })) + }) + }) + }) + }) + + Context("update func", func() { + var ( + e event.UpdateEvent + ) + + BeforeEach(func() { + oldCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ca1", + }, + } + + newCA = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: oldCA.Name, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldCA, + ObjectNew: newCA, + } + + client = &mocks.Client{} + reconciler = &ReconcileIBPCA{ + update: map[string][]Update{}, + client: client, + mutex: &sync.Mutex{}, + } + }) + + It("returns false if zone being update", func() { + oldCA.Spec.Zone = "old_zone" + newCA.Spec.Zone = "new_zone" + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + }) + + It("returns false if region being update", func() { + oldCA.Spec.Region = "old_region" + newCA.Spec.Region = "new_region" + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + }) + + It("returns false old and new objects are equal", func() { + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + }) + + It("returns true if spec updated", func() { + newCA.Spec.ImagePullSecrets = []string{"secret1"} + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).SpecUpdated()).To(Equal(true)) + }) + + It("returns true if ca overrides created for the first time", func() { + newCA.Spec.ConfigOverride = ¤t.ConfigOverride{} + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).CAOverridesUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).TLSCAOverridesUpdated()).To(Equal(true)) + }) + + It("returns true if enrollment ca overrides updated", func() { + oldCA.Spec.ConfigOverride = ¤t.ConfigOverride{} + newCA.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{}, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).CAOverridesUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).TLSCAOverridesUpdated()).To(Equal(false)) + }) + + Context("ca crypto", func() { + var ( + oldSecret *corev1.Secret + newSecret *corev1.Secret + ) + + BeforeEach(func() { + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-ca-crypto", newCA.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: newCA.Name, + Kind: "IBPCA", + }, + }, + }, + } + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-ca-crypto", newCA.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: newCA.Name, + Kind: "IBPCA", + }, + }, + }, + } + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + }) + + It("returns false if secret data not changed between old and new secret", func() { + oldSecret.Data = map[string][]byte{ + "tls-cert.pem": []byte("cert"), + } + newSecret.Data = map[string][]byte{ + "tls-cert.pem": []byte("cert"), + } + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + }) + + It("returns true if secret data changed between old and new secret", func() { + oldSecret.Data = map[string][]byte{ + "tls-cert.pem": []byte("cert"), + } + newSecret.Data = map[string][]byte{ + "tls-cert.pem": []byte("newcert"), + } + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).CACryptoUpdated()).To(Equal(true)) + }) + + It("returns false if anything other than secret data changed between old and new secret", func() { + oldSecret.APIVersion = "v1" + newSecret.APIVersion = "v2" + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + }) + }) + + It("returns true if tls ca overrides updated", func() { + caConfig := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CA: v1.CAInfo{ + Name: "ca", + }, + }, + } + + caJson, err := util.ConvertToJsonMessage(caConfig) + Expect(err).NotTo(HaveOccurred()) + + oldCA.Spec.ConfigOverride = ¤t.ConfigOverride{} + newCA.Spec.ConfigOverride = ¤t.ConfigOverride{ + TLSCA: &runtime.RawExtension{Raw: *caJson}, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(newCA).CAOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(newCA).TLSCAOverridesUpdated()).To(Equal(true)) + + }) + + Context("remove element", func() { + BeforeEach(func() { + reconciler.PushUpdate(newCA.Name, Update{ + caOverridesUpdated: true, + }) + + reconciler.PushUpdate(newCA.Name, Update{ + tlscaOverridesUpdated: true, + }) + + Expect(reconciler.GetUpdateStatus(newCA).CAOverridesUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatusAtElement(newCA, 1).TLSCAOverridesUpdated()).To(Equal(true)) + }) + + It("removes top element", func() { + reconciler.PopUpdate(newCA.Name) + Expect(reconciler.GetUpdateStatus(newCA).CAOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(newCA).TLSCAOverridesUpdated()).To(Equal(true)) + }) + + It("removing more elements than in slice should not panic", func() { + reconciler.PopUpdate(newCA.Name) + reconciler.PopUpdate(newCA.Name) + reconciler.PopUpdate(newCA.Name) + Expect(reconciler.GetUpdateStatus(newCA).SpecUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(newCA).CAOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(newCA).TLSCAOverridesUpdated()).To(Equal(false)) + }) + }) + + Context("push update", func() { + It("pushes update only if missing for certificate update", func() { + reconciler.PushUpdate(newCA.Name, Update{specUpdated: true}) + Expect(len(reconciler.update[newCA.Name])).To(Equal(1)) + reconciler.PushUpdate(newCA.Name, Update{caOverridesUpdated: true}) + Expect(len(reconciler.update[newCA.Name])).To(Equal(2)) + reconciler.PushUpdate(newCA.Name, Update{tlscaOverridesUpdated: true}) + Expect(len(reconciler.update[newCA.Name])).To(Equal(3)) + reconciler.PushUpdate(newCA.Name, Update{tlscaOverridesUpdated: true}) + Expect(len(reconciler.update[newCA.Name])).To(Equal(3)) + reconciler.PushUpdate(newCA.Name, Update{restartNeeded: true, specUpdated: true}) + Expect(len(reconciler.update[newCA.Name])).To(Equal(4)) + }) + }) + + Context("fabric version", func() { + It("returns no updates when fabric version is not changed", func() { + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{})) + }) + + When("fabric version updated", func() { + BeforeEach(func() { + newCA.Spec.FabricVersion = "2.2.1-1" + }) + + It("sets fabric version to true on version change", func() { + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: true, + fabricVersionUpdated: true, + })) + }) + }) + }) + + Context("images", func() { + It("returns no updates when images are not changed", func() { + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{})) + }) + + When("images updated", func() { + BeforeEach(func() { + newCA.Spec.Images = ¤t.CAImages{ + CAImage: "caimage2", + } + + oldCA.Spec.Images = ¤t.CAImages{ + CAImage: "caimage1", + } + }) + + It("sets imagesUpdated to true on image changes", func() { + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatus(newCA)).To(Equal(&Update{ + specUpdated: true, + imagesUpdated: true, + })) + }) + }) + }) + }) +}) diff --git a/controllers/ibpconsole/ibpconsole_controller.go b/controllers/ibpconsole/ibpconsole_controller.go new file mode 100644 index 00000000..a2252c39 --- /dev/null +++ b/controllers/ibpconsole/ibpconsole_controller.go @@ -0,0 +1,636 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpconsole + +import ( + "context" + "fmt" + "os" + "reflect" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoncontroller "github.com/IBM-Blockchain/fabric-operator/controllers/common" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + k8sconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console" + openshiftconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller_ibpconsole") + +// Add creates a new IBPPeer Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager, config *config.Config) error { + r, err := newReconciler(mgr, config) + if err != nil { + return err + } + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, cfg *config.Config) (*ReconcileIBPConsole, error) { + client := k8sclient.New(mgr.GetClient(), &global.ConfigSetter{Config: cfg.Operator.Globals}) + scheme := mgr.GetScheme() + + ibpconsole := &ReconcileIBPConsole{ + client: client, + scheme: scheme, + Config: cfg, + } + + switch cfg.Offering { + case offering.K8S: + ibpconsole.Offering = k8sconsole.New(client, scheme, cfg) + case offering.OPENSHIFT: + ibpconsole.Offering = openshiftconsole.New(client, scheme, cfg) + } + + return ibpconsole, nil +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r *ReconcileIBPConsole) error { + // Create a new controller + predicateFuncs := predicate.Funcs{ + CreateFunc: r.CreateFunc, + UpdateFunc: r.UpdateFunc, + } + + c, err := controller.New("ibpconsole-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource IBPConsole + err = c.Watch(&source.Kind{Type: ¤t.IBPConsole{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + // Watch for changes to secondary resource Pods and requeue the owner IBPPeer + err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPConsole{}, + }) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileIBPConsole{} + +//go:generate counterfeiter -o mocks/consolereconcile.go -fake-name ConsoleReconcile . consoleReconcile + +type consoleReconcile interface { + Reconcile(*current.IBPConsole, baseconsole.Update) (common.Result, error) +} + +// ReconcileIBPConsole reconciles a IBPConsole object +type ReconcileIBPConsole struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client k8sclient.Client + scheme *runtime.Scheme + + Offering consoleReconcile + Config *config.Config + + update Update +} + +// Reconcile reads that state of the cluster for a IBPConsole object and makes changes based on the state read +// and what is in the IBPConsole.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileIBPConsole) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + var err error + + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + reqLogger.Info(fmt.Sprintf("Reconciling IBPConsole with update values of [ %+v ]", r.update.GetUpdateStackWithTrues())) + + // Fetch the IBPConsole instance + instance := ¤t.IBPConsole{} + err = r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + result, err := r.Offering.Reconcile(instance, &r.update) + setStatusErr := r.SetStatus(instance, err) + if setStatusErr != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(setStatusErr, "failed to update status", log) + } + + if err != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(errors.Wrapf(err, "Console instance '%s' encountered error", instance.GetName()), "stopping reconcile loop", log) + } + + reqLogger.Info(fmt.Sprintf("Finished reconciling IBPConsole '%s' with update values of [ %+v ]", instance.GetName(), r.update.GetUpdateStackWithTrues())) + return result.Result, nil +} + +func (r *ReconcileIBPConsole) SetStatus(instance *current.IBPConsole, reconcileErr error) error { + err := r.SaveSpecState(instance) + if err != nil { + return errors.Wrap(err, "failed to save spec state") + } + + err = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.GetName(), Namespace: instance.GetNamespace()}, instance) + if err != nil { + return err + } + + status := instance.Status.CRStatus + + if reconcileErr != nil { + status.Type = current.Error + status.Status = current.True + status.Reason = "errorOccurredDuringReconcile" + status.Message = reconcileErr.Error() + status.LastHeartbeatTime = time.Now().String() + status.ErrorCode = operatorerrors.GetErrorCode(reconcileErr) + + instance.Status = current.IBPConsoleStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status of IBPConsole custom resource to %s phase", instance.Status.Type)) + err := r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPConsole{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + + status.Versions.Reconciled = instance.Spec.Version + + running, err := r.GetPodStatus(instance) + if err != nil { + return err + } + + if running { + if instance.Status.Type == current.Deployed { + return nil + } + status.Type = current.Deployed + status.Status = current.True + status.Reason = "allPodsRunning" + } else { + if instance.Status.Type == current.Deploying { + return nil + } + status.Type = current.Deploying + status.Status = current.True + status.Reason = "waitingForPods" + } + + instance.Status = current.IBPConsoleStatus{ + CRStatus: status, + } + instance.Status.LastHeartbeatTime = time.Now().String() + log.Info(fmt.Sprintf("Updating status of IBPConsole custom resource to %s phase", instance.Status.Type)) + err = r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPConsole{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPConsole) GetPodStatus(instance *current.IBPConsole) (bool, error) { + labelSelector, err := labels.Parse(fmt.Sprintf("app=%s", instance.Name)) + if err != nil { + return false, errors.Wrap(err, "failed to parse label selector for app name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: instance.Namespace, + } + + podList := &corev1.PodList{} + err = r.client.List(context.TODO(), podList, listOptions) + if err != nil { + return false, err + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + } + + return true, nil +} + +func (r *ReconcileIBPConsole) getIgnoreDiffs() []string { + return []string{ + `Template\.Spec\.Containers\.slice\[\d\]\.Resources\.Requests\.map\[memory\].s`, + } +} + +func (r *ReconcileIBPConsole) getLabels(instance v1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app": instance.GetName(), + "creator": label, + "release": "operator", + "helm.sh/chart": "ibm-" + label, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "console", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (r *ReconcileIBPConsole) getSelectorLabels(instance v1.Object) map[string]string { + return map[string]string{ + "app": instance.GetName(), + } +} + +func (r *ReconcileIBPConsole) CreateFunc(e event.CreateEvent) bool { + r.update = Update{} + + console := e.Object.(*current.IBPConsole) + if console.Status.HasType() { + cm, err := r.GetSpecState(console) + if err != nil { + log.Info(fmt.Sprintf("Failed getting saved console spec '%s', can't perform update checks, triggering reconcile: %s", console.GetName(), err.Error())) + return true + } + + specBytes := cm.BinaryData["spec"] + savedConsole := ¤t.IBPConsole{} + + err = yaml.Unmarshal(specBytes, &savedConsole.Spec) + if err != nil { + log.Info(fmt.Sprintf("Unmarshal failed for saved console spec '%s', can't perform update checks, triggering reconcile: %s", console.GetName(), err.Error())) + return true + } + + if !reflect.DeepEqual(console.Spec, savedConsole.Spec) { + log.Info(fmt.Sprintf("IBPConsole '%s' spec was updated while operator was down, triggering reconcile", console.GetName())) + r.update.specUpdated = true + + if r.DeployerCMUpdated(console.Spec, savedConsole.Spec) { + r.update.deployerCMUpdated = true + } + if r.ConsoleCMUpdated(console.Spec, savedConsole.Spec) { + r.update.consoleCMUpdated = true + } + if r.EnvCMUpdated(console.Spec, savedConsole.Spec) { + r.update.envCMUpdated = true + } + + return true + } + + // Don't trigger reconcile if spec was not updated during operator restart + return false + } + + // If creating resource for the first time, check that a unique name is provided + err := commoncontroller.ValidateCRName(r.client, console.Name, console.Namespace, commoncontroller.IBPCONSOLE) + if err != nil { + log.Error(err, "failed to validate console name") + operror := operatorerrors.Wrap(err, operatorerrors.InvalidCustomResourceCreateRequest, "failed to validate ibpconsole name") + err = r.SetStatus(console, operror) + if err != nil { + log.Error(err, "failed to set status to error", "console.name", console.Name, "error", "InvalidCustomResourceCreateRequest") + } + + return false + } + + return true +} + +func (r *ReconcileIBPConsole) UpdateFunc(e event.UpdateEvent) bool { + r.update = Update{} + + oldConsole := e.ObjectOld.(*current.IBPConsole) + newConsole := e.ObjectNew.(*current.IBPConsole) + + if util.CheckIfZoneOrRegionUpdated(oldConsole.Spec.Zone, newConsole.Spec.Zone) { + log.Error(errors.New("Zone update is not allowed"), "invalid spec update") + return false + } + + if util.CheckIfZoneOrRegionUpdated(oldConsole.Spec.Region, newConsole.Spec.Region) { + log.Error(errors.New("Region update is not allowed"), "invalid spec update") + return false + } + + if reflect.DeepEqual(oldConsole.Spec, newConsole.Spec) { + return false + } + + log.Info(fmt.Sprintf("Spec update detected on IBPConsole custom resource: %s", oldConsole.Name)) + r.update.specUpdated = true + + if newConsole.Spec.Action.Restart == true { + r.update.restartNeeded = true + } + + return true +} + +func (r *ReconcileIBPConsole) SaveSpecState(instance *current.IBPConsole) error { + data, err := yaml.Marshal(instance.Spec) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + }, + BinaryData: map[string][]byte{ + "spec": data, + }, + } + + err = r.client.CreateOrUpdate(context.TODO(), cm, controllerclient.CreateOrUpdateOption{Owner: instance, Scheme: r.scheme}) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPConsole) GetSpecState(instance *current.IBPConsole) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + } + + err := r.client.Get(context.TODO(), nn, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func (r *ReconcileIBPConsole) DeployerCMUpdated(old, new current.IBPConsoleSpec) bool { + if !reflect.DeepEqual(old.ImagePullSecrets, new.ImagePullSecrets) { + return true + } + if !reflect.DeepEqual(old.Deployer, new.Deployer) { + return true + } + if old.NetworkInfo.Domain != new.NetworkInfo.Domain { + return true + } + if old.RegistryURL != new.RegistryURL { + return true + } + if !reflect.DeepEqual(old.Arch, new.Arch) { + return true + } + if !reflect.DeepEqual(old.Versions, new.Versions) { + return true + } + // Uncomment if MustGather changes are ported into release 2.5.2 + // if old.Images.MustgatherImage != new.Images.MustgatherImage { + // return true + // } + // if old.Images.MustgatherTag != new.Images.MustgatherTag { + // return true + // } + if !reflect.DeepEqual(old.Storage, new.Storage) { + return true + } + if !reflect.DeepEqual(old.CRN, new.CRN) { + return true + } + + oldOverrides, err := old.GetOverridesDeployer() + if err != nil { + return false + } + newOverrides, err := new.GetOverridesDeployer() + if err != nil { + return false + } + if !reflect.DeepEqual(oldOverrides, newOverrides) { + return true + } + + return false +} + +func (r *ReconcileIBPConsole) ConsoleCMUpdated(old, new current.IBPConsoleSpec) bool { + if !reflect.DeepEqual(old.IBMID, new.IBMID) { + return true + } + if old.IAMApiKey != new.IAMApiKey { + return true + } + if old.SegmentWriteKey != new.SegmentWriteKey { + return true + } + if old.Email != new.Email { + return true + } + if old.AuthScheme != new.AuthScheme { + return true + } + if old.ConfigtxlatorURL != new.ConfigtxlatorURL { + return true + } + if old.DeployerURL != new.DeployerURL { + return true + } + if old.DeployerTimeout != new.DeployerTimeout { + return true + } + if old.Components != new.Components { + return true + } + if old.Sessions != new.Sessions { + return true + } + if old.System != new.System { + return true + } + if old.SystemChannel != new.SystemChannel { + return true + } + if !reflect.DeepEqual(old.Proxying, new.Proxying) { + return true + } + if !reflect.DeepEqual(old.FeatureFlags, new.FeatureFlags) { + return true + } + if !reflect.DeepEqual(old.ClusterData, new.ClusterData) { + return true + } + if !reflect.DeepEqual(old.CRN, new.CRN) { + return true + } + + oldOverrides, err := old.GetOverridesConsole() + if err != nil { + return false + } + newOverrides, err := new.GetOverridesConsole() + if err != nil { + return false + } + if !reflect.DeepEqual(oldOverrides, newOverrides) { + return true + } + + return false +} + +func (r *ReconcileIBPConsole) EnvCMUpdated(old, new current.IBPConsoleSpec) bool { + if old.ConnectionString != new.ConnectionString { + return true + } + if old.System != new.System { + return true + } + if old.TLSSecretName != new.TLSSecretName { + return true + } + + return false +} + +func (r *ReconcileIBPConsole) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(¤t.IBPConsole{}). + Complete(r) +} + +type Update struct { + specUpdated bool + restartNeeded bool + deployerCMUpdated bool + consoleCMUpdated bool + envCMUpdated bool +} + +func (u *Update) SpecUpdated() bool { + return u.specUpdated +} + +func (u *Update) RestartNeeded() bool { + return u.restartNeeded +} + +func (u *Update) DeployerCMUpdated() bool { + return u.deployerCMUpdated +} + +func (u *Update) ConsoleCMUpdated() bool { + return u.consoleCMUpdated +} + +func (u *Update) EnvCMUpdated() bool { + return u.envCMUpdated +} + +func (u *Update) GetUpdateStackWithTrues() string { + stack := "" + + if u.specUpdated { + stack += "specUpdated " + } + if u.restartNeeded { + stack += "restartNeeded " + } + if u.deployerCMUpdated { + stack += "deployerCMUpdated " + } + if u.consoleCMUpdated { + stack += "consoleCMUpdated " + } + if u.envCMUpdated { + stack += "envCMUpdated " + } + + if len(stack) == 0 { + stack = "emptystack " + } + + return stack +} diff --git a/controllers/ibpconsole/ibpconsole_controller_test.go b/controllers/ibpconsole/ibpconsole_controller_test.go new file mode 100644 index 00000000..bf7b2317 --- /dev/null +++ b/controllers/ibpconsole/ibpconsole_controller_test.go @@ -0,0 +1,234 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpconsole + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolemocks "github.com/IBM-Blockchain/fabric-operator/controllers/ibpconsole/mocks" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ReconcileIBPConsole", func() { + const ( + testRoleBindingFile = "../../../definitions/console/rolebinding.yaml" + testServiceAccountFile = "../../../definitions/console/serviceaccount.yaml" + ) + + var ( + reconciler *ReconcileIBPConsole + request reconcile.Request + mockKubeClient *mocks.Client + mockConsoleReconcile *consolemocks.ConsoleReconcile + instance *current.IBPConsole + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + mockConsoleReconcile = &consolemocks.ConsoleReconcile{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{}, + } + instance.Name = "test-console" + instance.Namespace = "test-namespace" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPConsole: + o := obj.(*current.IBPConsole) + o.Kind = "IBPConsole" + o.Spec = instance.Spec + o.Name = instance.Name + + instance.Status = o.Status + } + return nil + } + + mockKubeClient.UpdateStatusStub = func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + switch obj.(type) { + case *current.IBPConsole: + o := obj.(*current.IBPConsole) + instance.Status = o.Status + } + return nil + } + + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.NodeList: + nodeList := obj.(*corev1.NodeList) + node := corev1.Node{} + node.Labels = map[string]string{} + node.Labels["topology.kubernetes.io/zone"] = "dal" + node.Labels["topology.kubernetes.io/region"] = "us-south" + nodeList.Items = append(nodeList.Items, node) + case *current.IBPConsoleList: + list := obj.(*current.IBPConsoleList) + console1 := current.IBPConsole{} + console1.Name = "test-console1" + console2 := current.IBPConsole{} + console2.Name = "test-console1" + list.Items = []current.IBPConsole{console1, console2} + case *current.IBPPeerList: + caList := obj.(*current.IBPPeerList) + p1 := current.IBPPeer{} + p1.Name = "test-peer" + caList.Items = []current.IBPPeer{p1} + } + return nil + } + + reconciler = &ReconcileIBPConsole{ + Config: &config.Config{}, + Offering: mockConsoleReconcile, + client: mockKubeClient, + scheme: &runtime.Scheme{}, + } + request = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test-namespace", + Name: "test", + }, + } + }) + + Context("Reconciles", func() { + It("does not return an error if the custom resource is 'not fonund'", func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error if the request to get custom resource return any other errors besides 'not found'", func() { + alreadyExistsErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Message: "already exists", + Reason: metav1.StatusReasonAlreadyExists, + }, + } + mockKubeClient.GetReturns(alreadyExistsErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("already exists")) + }) + + It("returns an error if it encountered a non-breaking error", func() { + errMsg := "failed to reconcile deployment encountered breaking error" + mockConsoleReconcile.ReconcileReturns(common.Result{}, errors.New(errMsg)) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("Console instance '%s' encountered error: %s", instance.Name, errMsg))) + }) + + It("does not return an error if it encountered a breaking error", func() { + mockConsoleReconcile.ReconcileReturns(common.Result{}, operatorerrors.New(operatorerrors.InvalidDeploymentCreateRequest, "failed to reconcile deployment encountered breaking error")) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("set status", func() { + It("returns an error if the custom resource is not found", func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + err := reconciler.SetStatus(instance, notFoundErr) + Expect(err).To(HaveOccurred()) + }) + + It("sets the status to error if error occured during IBPConsole reconciliation", func() { + reconciler.SetStatus(instance, errors.New("ibpconsole error")) + Expect(instance.Status.Type).To(Equal(current.Error)) + Expect(instance.Status.Message).To(Equal("ibpconsole error")) + }) + + It("sets the status to deploying if pod is not yet running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + podList := obj.(*corev1.PodList) + pod := corev1.Pod{} + podList.Items = append(podList.Items, pod) + return nil + } + reconciler.SetStatus(instance, nil) + Expect(instance.Status.Type).To(Equal(current.Deploying)) + }) + + It("sets the status to deployed if pod is running", func() { + reconciler.SetStatus(instance, nil) + Expect(instance.Status.Type).To(Equal(current.Deployed)) + }) + }) + + Context("create func predicate", func() { + var ( + e event.CreateEvent + ) + + BeforeEach(func() { + e = event.CreateEvent{ + Object: instance, + } + }) + + It("returns false if new console's name already exists for another IBPConsole", func() { + instance.Name = "test-console1" + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + Expect(instance.Status.Type).To(Equal(current.Error)) + }) + + It("returns false if new console's name already exists for another custom resource", func() { + instance.Name = "test-peer" + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + Expect(instance.Status.Type).To(Equal(current.Error)) + }) + + It("returns true if new console with valid name created", func() { + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + }) + }) +}) diff --git a/controllers/ibpconsole/ibpconsole_suite_test.go b/controllers/ibpconsole/ibpconsole_suite_test.go new file mode 100644 index 00000000..6b6eb0d8 --- /dev/null +++ b/controllers/ibpconsole/ibpconsole_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibpconsole_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestIbpconsole(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ibpconsole Suite") +} diff --git a/controllers/ibpconsole/mocks/consolereconcile.go b/controllers/ibpconsole/mocks/consolereconcile.go new file mode 100644 index 00000000..050a92fa --- /dev/null +++ b/controllers/ibpconsole/mocks/consolereconcile.go @@ -0,0 +1,118 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" +) + +type ConsoleReconcile struct { + ReconcileStub func(*v1beta1.IBPConsole, baseconsole.Update) (common.Result, error) + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 *v1beta1.IBPConsole + arg2 baseconsole.Update + } + reconcileReturns struct { + result1 common.Result + result2 error + } + reconcileReturnsOnCall map[int]struct { + result1 common.Result + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *ConsoleReconcile) Reconcile(arg1 *v1beta1.IBPConsole, arg2 baseconsole.Update) (common.Result, error) { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 *v1beta1.IBPConsole + arg2 baseconsole.Update + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *ConsoleReconcile) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *ConsoleReconcile) ReconcileCalls(stub func(*v1beta1.IBPConsole, baseconsole.Update) (common.Result, error)) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *ConsoleReconcile) ReconcileArgsForCall(i int) (*v1beta1.IBPConsole, baseconsole.Update) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *ConsoleReconcile) ReconcileReturns(result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *ConsoleReconcile) ReconcileReturnsOnCall(i int, result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 common.Result + result2 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *ConsoleReconcile) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *ConsoleReconcile) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} diff --git a/controllers/ibporderer/ibporderer_controller.go b/controllers/ibporderer/ibporderer_controller.go new file mode 100644 index 00000000..9f7ba80c --- /dev/null +++ b/controllers/ibporderer/ibporderer_controller.go @@ -0,0 +1,1118 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibporderer + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "sync" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoncontroller "github.com/IBM-Blockchain/fabric-operator/controllers/common" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + orderer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + k8sorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer" + openshiftorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + yaml "sigs.k8s.io/yaml" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + KIND = "IBPOrderer" +) + +var log = logf.Log.WithName("controller_ibporderer") + +// Add creates a new IBPOrderer Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager, config *config.Config) error { + r, err := newReconciler(mgr, config) + if err != nil { + return err + } + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, cfg *config.Config) (*ReconcileIBPOrderer, error) { + client := k8sclient.New(mgr.GetClient(), &global.ConfigSetter{Config: cfg.Operator.Globals}) + scheme := mgr.GetScheme() + + ibporderer := &ReconcileIBPOrderer{ + client: client, + scheme: scheme, + Config: cfg, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + RestartService: staggerrestarts.New(client, cfg.Operator.Restart.Timeout.Get()), + } + + switch cfg.Offering { + case offering.K8S: + ibporderer.Offering = k8sorderer.New(client, scheme, cfg) + case offering.OPENSHIFT: + ibporderer.Offering = openshiftorderer.New(client, scheme, cfg) + } + + return ibporderer, nil +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r *ReconcileIBPOrderer) error { + // Create a new controller + predicateFuncs := predicate.Funcs{ + CreateFunc: r.CreateFunc, + UpdateFunc: r.UpdateFunc, + DeleteFunc: r.DeleteFunc, + } + + c, err := controller.New("ibporderer-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource IBPOrderer + err = c.Watch(&source.Kind{Type: ¤t.IBPOrderer{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + // Watch for changes to config maps (Create and Update funcs handle only watching for restart config map) + err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + // TODO(user): Modify this to be the types you create that are owned by the primary resource + // Watch for changes to secondary resource Pods and requeue the owner IBPOrderer + err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPOrderer{}, + }) + if err != nil { + return err + } + + // Watch for changes to tertiary resource Secrets and requeue the owner IBPOrderer + err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPOrderer{}, + }, predicateFuncs) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileIBPOrderer{} + +//go:generate counterfeiter -o mocks/ordererreconcile.go -fake-name OrdererReconcile . ordererReconcile + +type ordererReconcile interface { + Reconcile(*current.IBPOrderer, baseorderer.Update) (common.Result, error) +} + +// ReconcileIBPOrderer reconciles a IBPOrderer object +type ReconcileIBPOrderer struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client k8sclient.Client + scheme *runtime.Scheme + + Offering ordererReconcile + Config *config.Config + RestartService *staggerrestarts.StaggerRestartsService + + update map[string][]Update + mutex *sync.Mutex +} + +// Reconcile reads that state of the cluster for a IBPOrderer object and makes changes based on the state read +// and what is in the IBPOrderer.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileIBPOrderer) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + var err error + + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + + // If orderer-restart-config configmap is the object being reconciled, reconcile the + // restart configmap. + if request.Name == "orderer-restart-config" { + requeue, err := r.ReconcileRestart(request.Namespace) + // Error reconciling restart - requeue the request. + if err != nil { + return reconcile.Result{}, err + } + // Restart reconciled, requeue request if required. + return reconcile.Result{ + Requeue: requeue, + }, nil + } + + reqLogger.Info("Reconciling IBPOrderer") + + // Fetch the IBPOrderer instance + instance := ¤t.IBPOrderer{} + err = r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, operatorerrors.IsBreakingError(err, "failed to reconcile restart", log) + } + + var maxNameLength *int + if instance.Spec.ConfigOverride != nil { + override := &orderer.OrdererOverrides{} + err := json.Unmarshal(instance.Spec.ConfigOverride.Raw, override) + if err != nil { + return reconcile.Result{}, err + } + maxNameLength = override.MaxNameLength + } + + err = util.ValidationChecks(instance.TypeMeta, instance.ObjectMeta, "IBPOrderer", maxNameLength) + if err != nil { + return reconcile.Result{}, err + } + + if instance.Spec.NodeNumber == nil { + // If version is nil, then this is a v210 instance and the reconcile + // loop needs to be triggered to allow for instance migration + if instance.Status.Version != "" { + if instance.Status.Type == current.Deployed || instance.Status.Type == current.Warning { + // This is cluster's update, we don't want to reconcile. + // It should only be status update + log.Info(fmt.Sprintf("Update detected on %s cluster spec '%s', not supported", instance.Status.Type, instance.GetName())) + return reconcile.Result{}, nil + } + } + } + + reqLogger.Info(fmt.Sprintf("Current update stack to process: %+v", GetUpdateStack(r.update))) + + update := r.GetUpdateStatus(instance) + reqLogger.Info(fmt.Sprintf("Reconciling IBPOrderer '%s' with update values of [ %+v ]", instance.GetName(), update.GetUpdateStackWithTrues())) + + result, err := r.Offering.Reconcile(instance, r.PopUpdate(instance.Name)) + setStatusErr := r.SetStatus(instance, &result, err) + if setStatusErr != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(setStatusErr, "failed to update status", log) + } + + if err != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(errors.Wrapf(err, "Orderer instance '%s' encountered error", instance.GetName()), "stopping reconcile loop", log) + } + + if result.Requeue { + r.PushUpdate(instance.Name, *update) + } + + reqLogger.Info(fmt.Sprintf("Finished reconciling IBPOrderer '%s' with update values of [ %+v ]", instance.GetName(), update.GetUpdateStackWithTrues())) + + // If the stack still has items that require processing, keep reconciling + // until the stack has been cleared + _, found := r.update[instance.GetName()] + if found { + if len(r.update[instance.GetName()]) > 0 { + return reconcile.Result{ + Requeue: true, + }, nil + } + } + + return result.Result, nil +} + +func (r *ReconcileIBPOrderer) SetStatus(instance *current.IBPOrderer, result *common.Result, reconcileErr error) error { + err := r.SaveSpecState(instance) + if err != nil { + return errors.Wrap(err, "failed to save spec state") + } + + // Hierachy of setting status on orderer node instance + // 1. If error has occurred update status and return + // 2. If error has not occurred, get list of pods and determine + // if pods are all running or still waiting to start. If all pods + // are running mark status as Deployed otherwise mark status as + // Deploying, but dont update status yet + // 3. Check to see if a custom status has been passed. If so, + // set that status but don't update. However, if OverrideUpdateStatus + // flag is set to true update the status and return + // 4. If OverrideUpdateStatus was not set in step 3, determine if genesis + // secret exists for the instance. If genesis secret does not exit update + // the status to precreate and return + + // Need to get to ensure we are working with the latest state of the instance + err = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.GetName(), Namespace: instance.GetNamespace()}, instance) + if err != nil { + return err + } + + status := instance.Status.CRStatus + + if reconcileErr != nil { + status.Type = current.Error + status.Status = current.True + status.Reason = "errorOccurredDuringReconcile" + status.Message = reconcileErr.Error() + status.LastHeartbeatTime = time.Now().String() + status.ErrorCode = operatorerrors.GetErrorCode(reconcileErr) + + instance.Status = current.IBPOrdererStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status of IBPOrderer custom resource (%s) to %s phase", instance.GetName(), instance.Status.Type)) + err := r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + + status.Versions.Reconciled = instance.Spec.FabricVersion + + // If this is a parent (cluster spec), then ignore setting status. Status should + // be set by the child nodes only, and child nodes should update the status of parent + // according to the statuses of the child nodes. This needs to stay after the check for + // reconcile error otherwise the CR won't get updated with error on parent CR if there + // are validation errors on the spec. + if instance.Spec.NodeNumber == nil { + return nil + } + + podStatus, err := r.GetPodStatus(instance) + if err != nil { + return err + } + + numberOfPodsRunning := 0 + for _, status := range podStatus { + if status.Phase == corev1.PodRunning { + numberOfPodsRunning++ + } + } + + // if numberOfPodsRunning == len(podStatus) && len(podStatus) > 0 { + if len(podStatus) > 0 { + if len(podStatus) == numberOfPodsRunning { + status.Type = current.Deployed + status.Status = current.True + status.Reason = "allPodsRunning" + status.Message = "allPodsRunning" + } else { + status.Type = current.Deploying + status.Status = current.True + status.Reason = "waitingForPods" + status.Message = "waitingForPods" + } + } + + // Check if reconcile loop returned an updated status that differs from exisiting status. + // If so, set status to the reconcile status. + if result != nil { + reconcileStatus := result.Status + if reconcileStatus != nil { + if instance.Status.Type != reconcileStatus.Type || instance.Status.Reason != reconcileStatus.Reason || instance.Status.Message != reconcileStatus.Message { + status.Type = reconcileStatus.Type + status.Status = current.True + status.Reason = reconcileStatus.Reason + status.Message = reconcileStatus.Message + status.LastHeartbeatTime = time.Now().String() + + if result.OverrideUpdateStatus { + instance.Status = current.IBPOrdererStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status returned by reconcile loop of IBPOrderer custom resource (%s) to %s phase", instance.GetName(), instance.Status.Type)) + err := r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + } else { + // If the reconcile loop returned an updated status that is the same as the current instance status, then no status update required. + // NOTE: This will only occur once the instance has hit Deployed state for the first time and would only switch between Deployed and + // Warning states. + log.Info(fmt.Sprintf("Reconcile loop returned a status that is the same as %s's current status (%s), not updating status", reconcileStatus.Type, instance.Name)) + return nil + } + } + + // There are cases we want to return before checking for genesis secrets, such as updating the spec with default values + // during prereconcile checks + if result.OverrideUpdateStatus { + return nil + } + } + + precreated := false + if instance.Spec.IsUsingChannelLess() { + log.Info(fmt.Sprintf("IBPOrderer custom resource (%s) is using channel less mode", instance.GetName())) + precreated = false + } else { + err = r.GetGenesisSecret(instance) + if err != nil { + log.Info(fmt.Sprintf("IBPOrderer custom resource (%s) pods are waiting for genesis block, setting status to precreate", instance.GetName())) + precreated = true + } + } + + if precreated { + status.Type = current.Precreated + status.Status = current.True + status.Reason = "waiting for genesis block" + status.Message = "waiting for genesis block" + } + + // Only update status if status is different from current status + if status.Type != "" && (instance.Status.Type != status.Type || instance.Status.Reason != status.Reason || instance.Status.Message != status.Message) { + status.LastHeartbeatTime = time.Now().String() + log.Info(fmt.Sprintf("Updating status of IBPOrderer custom resource (%s) from %s to %s phase", instance.GetName(), instance.Status.Type, status.Type)) + + instance.Status = current.IBPOrdererStatus{ + CRStatus: status, + } + + err = r.client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + } + + return nil +} + +func (r *ReconcileIBPOrderer) SaveSpecState(instance *current.IBPOrderer) error { + data, err := yaml.Marshal(instance.Spec) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + }, + BinaryData: map[string][]byte{ + "spec": data, + }, + } + + err = r.client.CreateOrUpdate(context.TODO(), cm, k8sclient.CreateOrUpdateOption{Owner: instance, Scheme: r.scheme}) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPOrderer) GetSpecState(instance *current.IBPOrderer) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + } + + err := r.client.Get(context.TODO(), nn, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func (r *ReconcileIBPOrderer) GetPodStatus(instance *current.IBPOrderer) (map[string]corev1.PodStatus, error) { + statuses := map[string]corev1.PodStatus{} + + labelSelector, err := labels.Parse(fmt.Sprintf("app=%s", instance.GetName())) + if err != nil { + return statuses, errors.Wrap(err, "failed to parse label selector for app name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: instance.GetNamespace(), + } + + podList := &corev1.PodList{} + err = r.client.List(context.TODO(), podList, listOptions) + if err != nil { + return statuses, err + } + + for _, pod := range podList.Items { + statuses[pod.Name] = pod.Status + } + + return statuses, nil +} + +func (r *ReconcileIBPOrderer) GetGenesisSecret(instance *current.IBPOrderer) error { + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-genesis", instance.GetName()), + Namespace: instance.GetNamespace(), + } + err := r.client.Get(context.TODO(), nn, &corev1.Secret{}) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPOrderer) CreateFunc(e event.CreateEvent) bool { + update := Update{} + + switch e.Object.(type) { + case *current.IBPOrderer: + orderer := e.Object.(*current.IBPOrderer) + log.Info(fmt.Sprintf("Create event detected for orderer '%s'", orderer.GetName())) + + if orderer.Status.HasType() { + log.Info(fmt.Sprintf("Operator restart detected, performing update checks on exisitng orderer '%s'", orderer.GetName())) + + cm, err := r.GetSpecState(orderer) + if err != nil { + log.Info(fmt.Sprintf("Failed getting saved orderer spec '%s', can't perform update checks, triggering reconcile: %s", orderer.GetName(), err.Error())) + return true + } + + specBytes := cm.BinaryData["spec"] + savedOrderer := ¤t.IBPOrderer{} + + err = yaml.Unmarshal(specBytes, &savedOrderer.Spec) + if err != nil { + log.Info(fmt.Sprintf("Unmarshal failed for saved orderer spec '%s', can't perform update checks, triggering reconcile: %s", orderer.GetName(), err.Error())) + return true + } + + if !reflect.DeepEqual(orderer.Spec, savedOrderer.Spec) { + log.Info(fmt.Sprintf("IBPOrderer '%s' spec was updated while operator was down", orderer.GetName())) + update.specUpdated = true + } + + if !reflect.DeepEqual(orderer.Spec.ConfigOverride, savedOrderer.Spec.ConfigOverride) { + log.Info(fmt.Sprintf("IBPOrderer '%s' overrides were updated while operator was down", orderer.GetName())) + update.overridesUpdated = true + } + + update.imagesUpdated = imagesUpdated(savedOrderer, orderer) + update.fabricVersionUpdated = fabricVersionUpdated(savedOrderer, orderer) + if fabricVersionUpdatedTo149plusOr221plus(savedOrderer, orderer) { + log.Info(fmt.Sprintf("Fabric version update detected from '%s' to '%s' setting tls cert created flag '%s'", savedOrderer.Spec.FabricVersion, orderer.Spec.FabricVersion, orderer.GetName())) + update.tlsCertCreated = true + } + + log.Info(fmt.Sprintf("Create event triggering reconcile for updating orderer '%s'", orderer.GetName())) + r.PushUpdate(orderer.GetName(), update) + } + + // If creating resource for the first time, check that a unique name is provided + err := commoncontroller.ValidateCRName(r.client, orderer.Name, orderer.Namespace, commoncontroller.IBPORDERER) + if err != nil { + log.Error(err, "failed to validate orderer name") + operror := operatorerrors.Wrap(err, operatorerrors.InvalidCustomResourceCreateRequest, "failed to validate custom resource name") + err = r.SetStatus(orderer, nil, operror) + if err != nil { + log.Error(err, "failed to set status to error", "orderer.name", orderer.Name, "error", "InvalidCustomResourceCreateRequest") + } + return false + } + + log.Info(fmt.Sprintf("Create event triggering reconcile for creating orderer '%s'", orderer.GetName())) + + case *corev1.Secret: + secret := e.Object.(*corev1.Secret) + + if secret.OwnerReferences == nil || len(secret.OwnerReferences) == 0 { + isOrdererSecret, err := r.AddOwnerReferenceToSecret(secret) + if err != nil || !isOrdererSecret { + return false + } + } + + if secret.OwnerReferences[0].Kind == KIND { + log.Info(fmt.Sprintf("Create event detected for secret '%s'", secret.GetName())) + instanceName := secret.OwnerReferences[0].Name + if util.IsSecretTLSCert(secret.Name) { + update.tlsCertCreated = true + } else if util.IsSecretEcert(secret.Name) { + update.ecertCreated = true + } else { + return false + } + + log.Info(fmt.Sprintf("Orderer crypto create triggering reconcile on IBPOrderer custom resource %s: update [ %+v ]", instanceName, update.GetUpdateStackWithTrues())) + r.PushUpdate(instanceName, update) + } + + case *appsv1.Deployment: + dep := e.Object.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Create event detected by IBPOrderer controller for deployment '%s', triggering reconcile", dep.GetName())) + + case *corev1.ConfigMap: + cm := e.Object.(*corev1.ConfigMap) + if cm.Name == "orderer-restart-config" { + log.Info(fmt.Sprintf("Create event detected by IBPOrderer contoller for config map '%s', triggering restart reconcile", cm.GetName())) + } else { + return false + } + + } + + return true +} + +func (r *ReconcileIBPOrderer) UpdateFunc(e event.UpdateEvent) bool { + update := Update{} + + switch e.ObjectOld.(type) { + case *current.IBPOrderer: + oldOrderer := e.ObjectOld.(*current.IBPOrderer) + newOrderer := e.ObjectNew.(*current.IBPOrderer) + log.Info(fmt.Sprintf("Update event detected for orderer '%s'", oldOrderer.GetName())) + + if oldOrderer.Spec.NodeNumber == nil { + if oldOrderer.Status.Type != newOrderer.Status.Type { + log.Info(fmt.Sprintf("Parent orderer %s status updated from %s to %s", oldOrderer.Name, oldOrderer.Status.Type, newOrderer.Status.Type)) + } + + if oldOrderer.Status.Type == current.Deployed || oldOrderer.Status.Type == current.Error || oldOrderer.Status.Type == current.Warning { + // Parent orderer has been fully deployed by this point + log.Info(fmt.Sprintf("Ignoring the IBPOrderer cluster (parent) update after %s", oldOrderer.Status.Type)) + return false + } + + } + + if util.CheckIfZoneOrRegionUpdated(oldOrderer.Spec.Zone, newOrderer.Spec.Zone) { + log.Error(errors.New("Zone update is not allowed"), "invalid spec update") + return false + } + + if util.CheckIfZoneOrRegionUpdated(oldOrderer.Spec.Region, newOrderer.Spec.Region) { + log.Error(errors.New("Region update is not allowed"), "invalid spec update") + return false + } + + // Need to trigger update when status has changed is to allow us update the + // status of the parent, since the status of the parent depends on the status + // of its children .Only flag status update when there is a meaninful change + // and not everytime the heartbeat is updated + if oldOrderer.Status != newOrderer.Status { + if oldOrderer.Status.Type != newOrderer.Status.Type || + oldOrderer.Status.Reason != newOrderer.Status.Reason || + oldOrderer.Status.Message != newOrderer.Status.Message { + + log.Info(fmt.Sprintf("%s status changed to '%+v' from '%+v'", oldOrderer.GetName(), newOrderer.Status, oldOrderer.Status)) + update.statusUpdated = true + } + } + + if !reflect.DeepEqual(oldOrderer.Spec.ConfigOverride, newOrderer.Spec.ConfigOverride) { + log.Info(fmt.Sprintf("%s config override updated", oldOrderer.GetName())) + update.overridesUpdated = true + } + + if !reflect.DeepEqual(oldOrderer.Spec, newOrderer.Spec) { + log.Info(fmt.Sprintf("%s spec updated", oldOrderer.GetName())) + update.specUpdated = true + } + + // Check for changes to orderer tag to determine if any migration logic needs to be executed + // from old orderer version to new orderer version + if oldOrderer.Spec.Images != nil && newOrderer.Spec.Images != nil { + if oldOrderer.Spec.Images.OrdererTag != newOrderer.Spec.Images.OrdererTag { + log.Info(fmt.Sprintf("%s orderer tag updated from %s to %s", oldOrderer.GetName(), oldOrderer.Spec.Images.OrdererTag, newOrderer.Spec.Images.OrdererTag)) + update.ordererTagUpdated = true + } + } + + if fabricVersionUpdatedTo149plusOr221plus(oldOrderer, newOrderer) { + log.Info(fmt.Sprintf("Fabric version update detected from '%s' to '%s' setting tls cert created flag '%s'", oldOrderer.Spec.FabricVersion, newOrderer.Spec.FabricVersion, newOrderer.GetName())) + update.tlsCertCreated = true + } + + update.mspUpdated = commoncontroller.MSPInfoUpdateDetected(oldOrderer.Spec.Secret, newOrderer.Spec.Secret) + + if newOrderer.Spec.Action.Restart { + update.restartNeeded = true + } + + if oldOrderer.Spec.Action.Reenroll.Ecert != newOrderer.Spec.Action.Reenroll.Ecert { + update.ecertReenrollNeeded = newOrderer.Spec.Action.Reenroll.Ecert + } + + if oldOrderer.Spec.Action.Reenroll.TLSCert != newOrderer.Spec.Action.Reenroll.TLSCert { + update.tlscertReenrollNeeded = newOrderer.Spec.Action.Reenroll.TLSCert + } + + if oldOrderer.Spec.Action.Reenroll.EcertNewKey != newOrderer.Spec.Action.Reenroll.EcertNewKey { + update.ecertNewKeyReenroll = newOrderer.Spec.Action.Reenroll.EcertNewKey + } + + if oldOrderer.Spec.Action.Reenroll.TLSCertNewKey != newOrderer.Spec.Action.Reenroll.TLSCertNewKey { + update.tlscertNewKeyReenroll = newOrderer.Spec.Action.Reenroll.TLSCertNewKey + } + + if newOrderer.Spec.Action.Enroll.Ecert { + update.ecertEnroll = true + } + + if newOrderer.Spec.Action.Enroll.TLSCert { + update.tlscertEnroll = true + } + + update.deploymentUpdated = deploymentUpdated(oldOrderer, newOrderer) + oldVer := version.String(oldOrderer.Spec.FabricVersion) + newVer := version.String(newOrderer.Spec.FabricVersion) + + // check if this V1 -> V2.2.x/V2.4.x orderer migration + if (oldOrderer.Spec.FabricVersion == "" || + version.GetMajorReleaseVersion(oldOrderer.Spec.FabricVersion) == version.V1) && + version.GetMajorReleaseVersion(newOrderer.Spec.FabricVersion) == version.V2 { + update.migrateToV2 = true + if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { + update.migrateToV24 = true + // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.4.1) + update.tlscertReenrollNeeded = true + } + } + + // check if this V2.2.x -> V2.4.x orderer migration + if (version.GetMajorReleaseVersion(oldOrderer.Spec.FabricVersion) == version.V2) && + oldVer.LessThan(version.V2_4_1) && + (newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1)) { + update.migrateToV24 = true + // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.4.1) + update.tlscertReenrollNeeded = true + } + + if oldOrderer.Spec.NodeOUDisabled() != newOrderer.Spec.NodeOUDisabled() { + update.nodeOUUpdated = true + } + + // if use updates NumSecondsWarningPeriod field once we have already run the reconcile + // we need to retrigger the timer logic + if oldOrderer.Spec.NumSecondsWarningPeriod != newOrderer.Spec.NumSecondsWarningPeriod { + update.ecertUpdated = true + update.tlsCertUpdated = true + log.Info(fmt.Sprintf("%s NumSecondsWarningPeriod updated", oldOrderer.GetName())) + } + + if update.Detected() { + log.Info(fmt.Sprintf("Spec update triggering reconcile on IBPOrderer custom resource %s: update [ %+v ]", oldOrderer.GetName(), update.GetUpdateStackWithTrues())) + r.PushUpdate(oldOrderer.GetName(), update) + return true + } + + case *appsv1.Deployment: + oldDeployment := e.ObjectOld.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Spec update detected by IBPOrderer controller on deployment '%s'", oldDeployment.GetName())) + + case *corev1.Secret: + oldSecret := e.ObjectOld.(*corev1.Secret) + newSecret := e.ObjectNew.(*corev1.Secret) + + if oldSecret.OwnerReferences == nil || len(oldSecret.OwnerReferences) == 0 { + isOrdererSecret, err := r.AddOwnerReferenceToSecret(oldSecret) + if err != nil || !isOrdererSecret { + return false + } + } + + if oldSecret.OwnerReferences[0].Kind == KIND { + if reflect.DeepEqual(oldSecret.Data, newSecret.Data) { + return false + } + + log.Info(fmt.Sprintf("Update event detected on secret '%s'", oldSecret.GetName())) + instanceName := oldSecret.OwnerReferences[0].Name + if util.IsSecretTLSCert(oldSecret.Name) { + update.tlsCertUpdated = true + log.Info(fmt.Sprintf("TLS cert updated for %s", instanceName)) + } + if util.IsSecretEcert(oldSecret.Name) { + update.ecertUpdated = true + log.Info(fmt.Sprintf("ecert updated for %s", instanceName)) + } + + if update.CertificateUpdated() { + log.Info(fmt.Sprintf("Orderer crypto update triggering reconcile on IBPOrderer custom resource %s: update [ %+v ]", instanceName, update.GetUpdateStackWithTrues())) + r.PushUpdate(instanceName, update) + return true + } + } + + case *corev1.ConfigMap: + cm := e.ObjectOld.(*corev1.ConfigMap) + if cm.Name == "orderer-restart-config" { + log.Info("Update event detected for orderer-restart-config, triggering restart reconcile") + return true + } + } + + return false +} + +func (r *ReconcileIBPOrderer) DeleteFunc(e event.DeleteEvent) bool { + switch e.Object.(type) { + case *current.IBPOrderer: + oldOrderer := e.Object.(*current.IBPOrderer) + + if oldOrderer.Spec.NodeNumber != nil { + log.Info(fmt.Sprintf("Orderer node %d (%s) deleted", *oldOrderer.Spec.NodeNumber, oldOrderer.GetName())) + + // Deleting this config map manually, in 2.5.1 release of operator this config map was created + // without proper controller references set and was not cleaned up on orderer resource deletion. + log.Info(fmt.Sprintf("Deleting %s-init-config config map, if found", oldOrderer.GetName())) + if err := r.client.Delete(context.TODO(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-config", oldOrderer.GetName()), + Namespace: oldOrderer.GetNamespace(), + }, + }); client.IgnoreNotFound(err) != nil { + log.Info(fmt.Sprintf("failed to delete config map: %s", err)) + } + + parentName := oldOrderer.ObjectMeta.Labels["parent"] + labelSelector, err := labels.Parse(fmt.Sprintf("parent=%s", parentName)) + if err != nil { + log.Info(fmt.Sprintf("failed to parse selector for parent name: %s", err.Error())) + return false + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: oldOrderer.GetNamespace(), + } + + ordererList := ¤t.IBPOrdererList{} + err = r.client.List(context.TODO(), ordererList, listOptions) + if err != nil { + log.Info(fmt.Sprintf("Ignoring Deletion of Orderer node %d (%s) due to error in getting list of other nodes: %s", *oldOrderer.Spec.NodeNumber, oldOrderer.GetName(), err.Error())) + return false + } + + log.Info(fmt.Sprintf("There are %d child nodes for the orderer parent %s.", len(ordererList.Items), parentName)) + + if len(ordererList.Items) == 0 { + log.Info(fmt.Sprintf("Deleting Parent (%s) of Orderer node %d (%s) as all nodes are deleted.", parentName, *oldOrderer.Spec.NodeNumber, oldOrderer.GetName())) + parent := ¤t.IBPOrderer{} + parent.SetName(parentName) + parent.SetNamespace(oldOrderer.GetNamespace()) + + err := r.client.Delete(context.TODO(), parent) + if err != nil { + log.Error(err, fmt.Sprintf("Error deleting parent (%s) of Orderer node %d (%s).", parentName, *oldOrderer.Spec.NodeNumber, oldOrderer.GetName())) + } + return false + } + + log.Info(fmt.Sprintf("Ignoring Deletion of Orderer node %d (%s) as there are %d nodes of parent still around", *oldOrderer.Spec.NodeNumber, oldOrderer.GetName(), len(ordererList.Items))) + return false + } + + log.Info(fmt.Sprintf("Orderer parent %s deleted", oldOrderer.GetName())) + parentName := oldOrderer.GetName() + labelSelector, err := labels.Parse(fmt.Sprintf("parent=%s", parentName)) + if err != nil { + log.Info(fmt.Sprintf("failed to parse selector for parent name: %s", err.Error())) + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: oldOrderer.GetNamespace(), + } + + ordererList := ¤t.IBPOrdererList{} + err = r.client.List(context.TODO(), ordererList, listOptions) + if err != nil { + log.Info(fmt.Sprintf("Ignoring Deletion of Orderer parent %s due to error in getting list of child nodes: %s", oldOrderer.GetName(), err.Error())) + return false + } + + log.Info(fmt.Sprintf("There are %d child nodes for the orderer parent %s.", len(ordererList.Items), parentName)) + + for _, item := range ordererList.Items { + log.Info(fmt.Sprintf("Deleting child node %s", item.GetName())) + + child := ¤t.IBPOrderer{} + child.SetName(item.GetName()) + child.SetNamespace(item.GetNamespace()) + + err := r.client.Delete(context.TODO(), child) + if err != nil { + log.Error(err, fmt.Sprintf("Error child node (%s) of Orderer (%s).", child.GetName(), parentName)) + } + } + + return false + + case *appsv1.Deployment: + dep := e.Object.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Delete detected by IBPOrderer controller on deployment '%s'", dep.GetName())) + case *corev1.Secret: + secret := e.Object.(*corev1.Secret) + log.Info(fmt.Sprintf("Delete detected by IBPOrderer controller on secret '%s'", secret.GetName())) + case *corev1.ConfigMap: + cm := e.Object.(*corev1.ConfigMap) + log.Info(fmt.Sprintf("Delete detected by IBPOrderer controller on configmap '%s'", cm.GetName())) + } + + return true +} + +func (r *ReconcileIBPOrderer) GetUpdateStatusAtElement(instance *current.IBPOrderer, index int) *Update { + r.mutex.Lock() + defer r.mutex.Unlock() + + update := Update{} + _, ok := r.update[instance.GetName()] + if !ok { + return &update + } + + if len(r.update[instance.GetName()]) >= 1 { + update = r.update[instance.GetName()][index] + } + + return &update +} + +func (r *ReconcileIBPOrderer) GetUpdateStatus(instance *current.IBPOrderer) *Update { + return r.GetUpdateStatusAtElement(instance, 0) +} + +func (r *ReconcileIBPOrderer) PushUpdate(instanceName string, update Update) { + r.mutex.Lock() + defer r.mutex.Unlock() + + r.update[instanceName] = r.AppendUpdateIfMissing(r.update[instanceName], update) +} + +func (r *ReconcileIBPOrderer) PopUpdate(instanceName string) *Update { + r.mutex.Lock() + defer r.mutex.Unlock() + + update := Update{} + if len(r.update[instanceName]) >= 1 { + update = r.update[instanceName][0] + if len(r.update[instanceName]) == 1 { + r.update[instanceName] = []Update{} + } else { + r.update[instanceName] = r.update[instanceName][1:] + } + } + + return &update +} + +func (r *ReconcileIBPOrderer) AppendUpdateIfMissing(updates []Update, update Update) []Update { + for _, u := range updates { + if u == update { + return updates + } + } + return append(updates, update) +} + +func deploymentUpdated(oldOrderer, newOrderer *current.IBPOrderer) bool { + if !reflect.DeepEqual(oldOrderer.Spec.Images, newOrderer.Spec.Images) { + log.Info(fmt.Sprintf("Images updated for '%s', deployment will be updated", newOrderer.Name)) + return true + } + + if !reflect.DeepEqual(oldOrderer.Spec.Replicas, newOrderer.Spec.Replicas) { + log.Info(fmt.Sprintf("Replica size updated for '%s', deployment will be updated", newOrderer.Name)) + return true + } + + if !reflect.DeepEqual(oldOrderer.Spec.Resources, newOrderer.Spec.Resources) { + log.Info(fmt.Sprintf("Resources updated for '%s', deployment will be updated", newOrderer.Name)) + return true + } + + if !reflect.DeepEqual(oldOrderer.Spec.Storage, newOrderer.Spec.Storage) { + log.Info(fmt.Sprintf("Storage updated for '%s', deployment will be updated", newOrderer.Name)) + return true + } + + if len(oldOrderer.Spec.ImagePullSecrets) != len(newOrderer.Spec.ImagePullSecrets) { + log.Info(fmt.Sprintf("ImagePullSecret updated for '%s', deployment will be updated", newOrderer.Name)) + return true + } + for i, v := range newOrderer.Spec.ImagePullSecrets { + if v != oldOrderer.Spec.ImagePullSecrets[i] { + log.Info(fmt.Sprintf("ImagePullSecret updated for '%s', deployment will be updated", newOrderer.Name)) + return true + } + } + + return false +} + +func (r *ReconcileIBPOrderer) AddOwnerReferenceToSecret(secret *corev1.Secret) (bool, error) { + // Orderer secrets we are looking to add owner references to are named: + // -- + // -init-rootcert + + // The following secrets are created by operator, and will have owner references: + // -genesis + // -crypto-backup + // -secret + + items := strings.Split(secret.Name, "-") + if len(items) < 3 { + // Secret names we are looking for will be split into at least 3 strings: + // [prefix, instance name, type] OR [instance name, "init", "rootcert"] + return false, nil + } + + // Account for the case where the instance's name is hyphenated + var instanceName string + if strings.Contains(secret.Name, "-init-rootcert") { + instanceName = strings.Join(items[:len(items)-2], "-") // instance name contains all but last 2 items + } else { + instanceName = strings.Join(items[1:len(items)-1], "-") // instance name contains all but first and last item + } + + listOptions := &client.ListOptions{ + Namespace: secret.Namespace, + } + + ordererList := ¤t.IBPOrdererList{} + err := r.client.List(context.TODO(), ordererList, listOptions) + if err != nil { + return false, errors.Wrap(err, "failed to get list of orderers") + } + + for _, o := range ordererList.Items { + orderer := o + if orderer.Name == instanceName { + // Instance 'i' found in list of orderers + err := r.client.Update(context.TODO(), secret, k8sclient.UpdateOption{ + Owner: &orderer, + Scheme: r.scheme, + }) + if err != nil { + return false, err + } + return true, nil + } + } + + return false, nil +} + +func (r *ReconcileIBPOrderer) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(¤t.IBPOrderer{}). + Complete(r) +} + +func GetUpdateStack(allUpdates map[string][]Update) string { + stack := "" + + for orderer, updates := range allUpdates { + currentStack := "" + for index, update := range updates { + currentStack += fmt.Sprintf("{ %s}", update.GetUpdateStackWithTrues()) + if index != len(updates)-1 { + currentStack += " , " + } + } + stack += fmt.Sprintf("%s: [ %s ] ", orderer, currentStack) + } + + return stack +} + +func (r *ReconcileIBPOrderer) ReconcileRestart(namespace string) (bool, error) { + requeue, err := r.RestartService.Reconcile("orderer", namespace) + if err != nil { + log.Error(err, "failed to reconcile restart queues in orderer-restart-config") + return false, err + } + + return requeue, nil +} diff --git a/controllers/ibporderer/ibporderer_controller_test.go b/controllers/ibporderer/ibporderer_controller_test.go new file mode 100644 index 00000000..7480997b --- /dev/null +++ b/controllers/ibporderer/ibporderer_controller_test.go @@ -0,0 +1,794 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibporderer + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + orderermocks "github.com/IBM-Blockchain/fabric-operator/controllers/ibporderer/mocks" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ReconcileIBPOrderer", func() { + const ( + testRoleBindingFile = "../../../definitions/orderer/rolebinding.yaml" + testServiceAccountFile = "../../../definitions/orderer/serviceaccount.yaml" + ) + + var ( + reconciler *ReconcileIBPOrderer + request reconcile.Request + mockKubeClient *mocks.Client + mockOrdererReconcile *orderermocks.OrdererReconcile + instance *current.IBPOrderer + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + mockOrdererReconcile = &orderermocks.OrdererReconcile{} + nodeNumber := 1 + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + ClusterSize: 3, + NodeNumber: &nodeNumber, + }, + } + instance.Name = "test-orderer" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPOrderer: + o := obj.(*current.IBPOrderer) + o.Kind = "IBPOrderer" + o.Name = instance.Name + + instance.Status = o.Status + } + return nil + } + + mockKubeClient.UpdateStatusStub = func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + switch obj.(type) { + case *current.IBPOrderer: + o := obj.(*current.IBPOrderer) + instance.Status = o.Status + } + return nil + } + + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.NodeList: + nodeList := obj.(*corev1.NodeList) + node := corev1.Node{} + node.Labels = map[string]string{} + node.Labels["topology.kubernetes.io/zone"] = "dal" + node.Labels["topology.kubernetes.io/region"] = "us-south" + nodeList.Items = append(nodeList.Items, node) + case *current.IBPOrdererList: + ordererList := obj.(*current.IBPOrdererList) + o1 := current.IBPOrderer{} + o1.Name = "test-orderer1" + o2 := current.IBPOrderer{} + o2.Name = "test-orderer2" + o3 := current.IBPOrderer{} + o3.Name = "test-orderer2" + ordererList.Items = []current.IBPOrderer{o1, o2, o3} + } + return nil + } + + reconciler = &ReconcileIBPOrderer{ + Offering: mockOrdererReconcile, + client: mockKubeClient, + scheme: &runtime.Scheme{}, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + } + request = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test-namespace", + Name: "test", + }, + } + }) + + Context("Reconciles", func() { + It("does not return an error if the custom resource is 'not found'", func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error if the request to get custom resource return any other error besides 'not found'", func() { + alreadyExistsErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Message: "already exists", + Reason: metav1.StatusReasonAlreadyExists, + }, + } + mockKubeClient.GetReturns(alreadyExistsErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("already exists")) + }) + + It("returns an error if it encountered a non-breaking error", func() { + errMsg := "failed to reconcile deployment encountered breaking error" + mockOrdererReconcile.ReconcileReturns(common.Result{}, errors.New(errMsg)) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("Orderer instance '%s' encountered error: %s", instance.Name, errMsg))) + }) + + It("does not return an error if it encountered a breaking error", func() { + mockOrdererReconcile.ReconcileReturns(common.Result{}, operatorerrors.New(operatorerrors.InvalidDeploymentCreateRequest, "failed to reconcile deployment encountered breaking error")) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("set status", func() { + It("sets the status to error if error occured during IBPOrderer reconciliation", func() { + reconciler.SetStatus(instance, nil, errors.New("ibporderer error")) + Expect(instance.Status.Type).To(Equal(current.Error)) + Expect(instance.Status.Message).To(Equal("ibporderer error")) + }) + + It("sets the status to deploying if pod is not yet running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + podList := obj.(*corev1.PodList) + pod := corev1.Pod{} + podList.Items = append(podList.Items, pod) + return nil + case *current.IBPOrdererList: + ordererList := obj.(*current.IBPOrdererList) + orderer := current.IBPOrderer{} + orderer.Status = current.IBPOrdererStatus{ + CRStatus: current.CRStatus{ + Type: current.Deploying, + }, + } + ordererList.Items = append(ordererList.Items, orderer) + return nil + } + return nil + } + + reconciler.SetStatus(instance, nil, nil) + Expect(instance.Status.Type).To(Equal(current.Deploying)) + }) + + It("sets the status to deployed if pod is running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + podList := obj.(*corev1.PodList) + pod := corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + } + podList.Items = append(podList.Items, pod) + + return nil + case *current.IBPOrdererList: + ordererList := obj.(*current.IBPOrdererList) + orderer := current.IBPOrderer{} + orderer.Status = current.IBPOrdererStatus{ + CRStatus: current.CRStatus{ + Type: current.Deployed, + }, + } + ordererList.Items = append(ordererList.Items, orderer) + return nil + } + return nil + } + + instance.Spec.ClusterSize = 1 + reconciler.SetStatus(instance, nil, nil) + Expect(instance.Status.Type).To(Equal(current.Deployed)) + }) + + It("sets the status to warning if the reconcile loop returns a warning status", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + podList := obj.(*corev1.PodList) + pod := corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + } + podList.Items = append(podList.Items, pod) + + return nil + case *current.IBPOrdererList: + ordererList := obj.(*current.IBPOrdererList) + orderer := current.IBPOrderer{} + orderer.Status = current.IBPOrdererStatus{ + CRStatus: current.CRStatus{ + Type: current.Deployed, + }, + } + ordererList.Items = append(ordererList.Items, orderer) + return nil + } + return nil + } + + result := &common.Result{ + Status: ¤t.CRStatus{ + Type: current.Warning, + }, + } + + instance.Spec.ClusterSize = 1 + reconciler.SetStatus(instance, result, nil) + Expect(instance.Status.Type).To(Equal(current.Warning)) + }) + + It("persists warning status if the instance is already in warning state and reconcile loop returns a warning status", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + podList := obj.(*corev1.PodList) + pod := corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + } + podList.Items = append(podList.Items, pod) + + return nil + case *current.IBPOrdererList: + ordererList := obj.(*current.IBPOrdererList) + orderer := current.IBPOrderer{} + orderer.Status = current.IBPOrdererStatus{ + CRStatus: current.CRStatus{ + Type: current.Deployed, + }, + } + ordererList.Items = append(ordererList.Items, orderer) + return nil + } + return nil + } + + result := &common.Result{ + Status: ¤t.CRStatus{ + Type: current.Warning, + }, + } + + instance.Spec.ClusterSize = 1 + instance.Status.Type = current.Warning + reconciler.SetStatus(instance, result, nil) + Expect(instance.Status.Type).To(Equal(current.Warning)) + }) + }) + }) + + Context("update reconcile", func() { + var ( + oldOrderer *current.IBPOrderer + newOrderer *current.IBPOrderer + oldSecret *corev1.Secret + newSecret *corev1.Secret + e event.UpdateEvent + ) + + BeforeEach(func() { + configOverride := &config.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + LedgerType: "type1", + }, + }, + } + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + + oldOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{ + OrdererTag: "1.4.6-20200101", + }, + ConfigOverride: &runtime.RawExtension{Raw: configBytes}, + }, + } + + configOverride2 := &config.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + LedgerType: "type2", + }, + }, + } + configBytes2, err := json.Marshal(configOverride2) + Expect(err).NotTo(HaveOccurred()) + newOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{ + OrdererTag: "1.4.9-2511004", + }, + ConfigOverride: &runtime.RawExtension{Raw: configBytes2}, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldOrderer, + ObjectNew: newOrderer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{ + OrdererTag: "1.4.9-2511004", + }, + MSPID: "old-mspid", + }, + } + + newOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{ + OrdererTag: "1.4.9-2511004", + }, + MSPID: "new-mspid", + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldOrderer, + ObjectNew: newOrderer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPOrderer", + }, + }, + }, + } + + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPOrderer", + }, + }, + }, + } + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ecert-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPOrderer", + }, + }, + }, + Data: map[string][]byte{ + "test": []byte("data"), + }, + } + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ecert-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + {Name: instance.Name}, + }, + }, + Data: map[string][]byte{ + "test": []byte("newdata"), + }, + } + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-admincert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPOrderer", + }, + }, + }, + } + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-admincert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPOrderer", + }, + }, + }, + } + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + + oldOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{}, + Secret: ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{ + SignCerts: "testcert", + }, + }, + }, + }, + } + + newOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{}, + Secret: ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + TLS: ¤t.MSP{ + SignCerts: "testcert", + }, + }, + }, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldOrderer, + ObjectNew: newOrderer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + }) + + It("properly pops update flags from stack", func() { + By("popping first update - config overrides", func() { + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance).OrdererTagUpdated()).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + By("popping second update - spec updated", func() { + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + By("popping third update - ecert updated", func() { + Expect(reconciler.GetUpdateStatus(instance).TLSCertUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).EcertUpdated()).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + By("popping fourth update - msp spec updated", func() { + Expect(reconciler.GetUpdateStatus(instance).TLSCertUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).EcertUpdated()).To(Equal(false)) + + Expect(reconciler.GetUpdateStatus(instance).MSPUpdated()).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + + Expect(reconciler.GetUpdateStatus(instance).MSPUpdated()).To(Equal(false)) + }) + + }) + + Context("enrollment information changes detection", func() { + BeforeEach(func() { + oldOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + newOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldOrderer, + ObjectNew: newOrderer, + } + }) + + It("returns false if new secret is nil", func() { + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).EcertEnroll()).To(Equal(false)) + }) + + It("returns false if new secret has ecert msp set along with enrollment inforamtion", func() { + oldOrderer.Spec.Secret = ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + EnrollID: "id1", + }, + }, + } + newOrderer.Spec.Secret = ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{}, + }, + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + EnrollID: "id2", + }, + }, + } + + newOrderer.Spec.Action = current.OrdererAction{ + Restart: true, + } + + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).EcertEnroll()).To(Equal(false)) + }) + }) + + Context("update node OU", func() { + BeforeEach(func() { + oldOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + newOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + newOrderer.Spec.DisableNodeOU = ¤t.BoolTrue + + e = event.UpdateEvent{ + ObjectOld: oldOrderer, + ObjectNew: newOrderer, + } + }) + + It("returns true if node ou updated in spec", func() { + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).NodeOUUpdated()).To(Equal(true)) + }) + }) + }) + + Context("status updated", func() { + var ( + oldOrderer *current.IBPOrderer + newOrderer *current.IBPOrderer + e event.UpdateEvent + ) + + BeforeEach(func() { + oldOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{}, + }, + } + newOrderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPOrdererSpec{ + Images: ¤t.OrdererImages{}, + }, + } + e = event.UpdateEvent{ + ObjectOld: oldOrderer, + ObjectNew: newOrderer, + } + }) + + It("does not set StatusUpdate flag if only heartbeat has changed", func() { + oldOrderer.Status.LastHeartbeatTime = time.Now().String() + newOrderer.Status.LastHeartbeatTime = time.Now().String() + + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).StatusUpdated()).To(Equal(false)) + }) + + It("sets StatusUpdated flag to true if status type has changed", func() { + oldOrderer.Status.Type = "old" + newOrderer.Status.Type = "new" + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance).StatusUpdated()).To(Equal(true)) + }) + + It("sets StatusUpdated flag to true if status reason has changed", func() { + oldOrderer.Status.Reason = "oldreason" + newOrderer.Status.Reason = "newreason" + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance).StatusUpdated()).To(Equal(true)) + }) + + It("sets StatusUpdated flag to true if status message has changed", func() { + oldOrderer.Status.Message = "oldmessage" + newOrderer.Status.Message = "newmessage" + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance).StatusUpdated()).To(Equal(true)) + }) + }) + + Context("append update if missing", func() { + It("appends update", func() { + updates := []Update{{tlsCertUpdated: true}} + updates = reconciler.AppendUpdateIfMissing(updates, Update{ecertUpdated: true}) + Expect(len(updates)).To(Equal(2)) + }) + + It("doesn't append update that is already in stack", func() { + updates := []Update{{tlsCertUpdated: true}} + updates = reconciler.AppendUpdateIfMissing(updates, Update{tlsCertUpdated: true}) + Expect(len(updates)).To(Equal(1)) + }) + }) + + Context("push update", func() { + It("pushes update only if missing for certificate update", func() { + reconciler.PushUpdate(instance.Name, Update{specUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(1)) + reconciler.PushUpdate(instance.Name, Update{tlsCertUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(2)) + reconciler.PushUpdate(instance.Name, Update{ecertUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(3)) + reconciler.PushUpdate(instance.Name, Update{tlsCertUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(3)) + reconciler.PushUpdate(instance.Name, Update{tlsCertUpdated: true, specUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(4)) + }) + }) + + Context("add owner reference to secret", func() { + var ( + secret *corev1.Secret + ) + + BeforeEach(func() { + secret = &corev1.Secret{} + secret.Name = "ecert-test-orderer1-signcert" + }) + + It("returns error if fails to get list of orderers", func() { + mockKubeClient.ListReturns(errors.New("list error")) + _, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("list error")) + }) + + It("returns false if secret doesn't belong to any orderers in list", func() { + secret.Name = "tls-peer1-signcert" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(false)) + }) + + It("returns false if secret's name doesn't match expected format", func() { + secret.Name = "orderersecret" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(false)) + }) + + It("returns true if owner references added to secret", func() { + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + + It("returns true if owner references added to init-rootcert secret", func() { + secret.Name = "test-orderer1-init-rootcert" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + }) +}) diff --git a/controllers/ibporderer/ibporderer_suite_test.go b/controllers/ibporderer/ibporderer_suite_test.go new file mode 100644 index 00000000..f0235965 --- /dev/null +++ b/controllers/ibporderer/ibporderer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibporderer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestIbporderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ibporderer Suite") +} diff --git a/controllers/ibporderer/mocks/ordererreconcile.go b/controllers/ibporderer/mocks/ordererreconcile.go new file mode 100644 index 00000000..9f06ac90 --- /dev/null +++ b/controllers/ibporderer/mocks/ordererreconcile.go @@ -0,0 +1,118 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" +) + +type OrdererReconcile struct { + ReconcileStub func(*v1beta1.IBPOrderer, baseorderer.Update) (common.Result, error) + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 baseorderer.Update + } + reconcileReturns struct { + result1 common.Result + result2 error + } + reconcileReturnsOnCall map[int]struct { + result1 common.Result + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *OrdererReconcile) Reconcile(arg1 *v1beta1.IBPOrderer, arg2 baseorderer.Update) (common.Result, error) { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 baseorderer.Update + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *OrdererReconcile) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *OrdererReconcile) ReconcileCalls(stub func(*v1beta1.IBPOrderer, baseorderer.Update) (common.Result, error)) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *OrdererReconcile) ReconcileArgsForCall(i int) (*v1beta1.IBPOrderer, baseorderer.Update) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *OrdererReconcile) ReconcileReturns(result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *OrdererReconcile) ReconcileReturnsOnCall(i int, result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 common.Result + result2 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *OrdererReconcile) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *OrdererReconcile) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} diff --git a/controllers/ibporderer/predicate.go b/controllers/ibporderer/predicate.go new file mode 100644 index 00000000..6b87d20c --- /dev/null +++ b/controllers/ibporderer/predicate.go @@ -0,0 +1,324 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibporderer + +import ( + "reflect" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/version" +) + +type Update struct { + specUpdated bool + statusUpdated bool + overridesUpdated bool + tlsCertUpdated bool + ecertUpdated bool + ordererTagUpdated bool + restartNeeded bool + ecertReenrollNeeded bool + tlscertReenrollNeeded bool + ecertNewKeyReenroll bool + tlscertNewKeyReenroll bool + deploymentUpdated bool + mspUpdated bool + ecertEnroll bool + tlscertEnroll bool + tlsCertCreated bool + ecertCreated bool + migrateToV2 bool + migrateToV24 bool + nodeOUUpdated bool + imagesUpdated bool + fabricVersionUpdated bool + // update GetUpdateStackWithTrues when new fields are added +} + +func (u *Update) Detected() bool { + return u.specUpdated || + u.statusUpdated || + u.overridesUpdated || + u.tlsCertUpdated || + u.ecertUpdated || + u.ordererTagUpdated || + u.restartNeeded || + u.ecertReenrollNeeded || + u.tlscertReenrollNeeded || + u.ecertNewKeyReenroll || + u.tlscertNewKeyReenroll || + u.deploymentUpdated || + u.mspUpdated || + u.ecertEnroll || + u.migrateToV2 || + u.migrateToV24 || + u.nodeOUUpdated || + u.imagesUpdated || + u.fabricVersionUpdated +} + +func (u *Update) SpecUpdated() bool { + return u.specUpdated +} + +func (u *Update) StatusUpdated() bool { + return u.statusUpdated +} + +func (u *Update) ConfigOverridesUpdated() bool { + return u.overridesUpdated +} + +func (u *Update) TLSCertUpdated() bool { + return u.tlsCertUpdated +} + +func (u *Update) EcertUpdated() bool { + return u.ecertUpdated +} + +func (u *Update) OrdererTagUpdated() bool { + return u.ordererTagUpdated +} + +func (u *Update) CertificateUpdated() bool { + return u.tlsCertUpdated || u.ecertUpdated +} + +func (u *Update) GetUpdatedCertType() commoninit.SecretType { + if u.tlsCertUpdated { + return commoninit.TLS + } else if u.ecertUpdated { + return commoninit.ECERT + } + return "" +} + +func (u *Update) RestartNeeded() bool { + return u.restartNeeded +} + +func (u *Update) EcertReenrollNeeded() bool { + return u.ecertReenrollNeeded +} + +func (u *Update) TLScertReenrollNeeded() bool { + return u.tlscertReenrollNeeded +} + +func (u *Update) EcertNewKeyReenroll() bool { + return u.ecertNewKeyReenroll +} + +func (u *Update) TLScertNewKeyReenroll() bool { + return u.tlscertNewKeyReenroll +} + +func (u *Update) EcertEnroll() bool { + return u.ecertEnroll +} + +func (u *Update) TLScertEnroll() bool { + return u.tlscertEnroll +} + +func (u *Update) DeploymentUpdated() bool { + return u.deploymentUpdated +} + +func (u *Update) MSPUpdated() bool { + return u.mspUpdated +} + +func (u *Update) TLSCertCreated() bool { + return u.tlsCertCreated +} + +func (u *Update) EcertCreated() bool { + return u.ecertCreated +} + +func (u *Update) CertificateCreated() bool { + return u.tlsCertCreated || u.ecertCreated +} + +func (u *Update) GetCreatedCertType() commoninit.SecretType { + if u.tlsCertCreated { + return commoninit.TLS + } else if u.ecertCreated { + return commoninit.ECERT + } + return "" +} + +func (u *Update) CryptoBackupNeeded() bool { + return u.ecertEnroll || + u.tlscertEnroll || + u.ecertReenrollNeeded || + u.ecertNewKeyReenroll || + u.tlscertReenrollNeeded || + u.tlscertNewKeyReenroll || + u.mspUpdated +} + +func (u *Update) MigrateToV2() bool { + return u.migrateToV2 +} + +func (u *Update) MigrateToV24() bool { + return u.migrateToV24 +} + +func (u *Update) NodeOUUpdated() bool { + return u.nodeOUUpdated +} + +func (u *Update) GetUpdateStackWithTrues() string { + stack := "" + + if u.specUpdated { + stack += "specUpdated " + } + if u.statusUpdated { + stack += "statusUpdated " + } + if u.overridesUpdated { + stack += "overridesUpdated " + } + if u.tlsCertUpdated { + stack += "tlsCertUpdated " + } + if u.ecertUpdated { + stack += "ecertUpdated " + } + if u.ordererTagUpdated { + stack += "ordererTagUpdated " + } + if u.restartNeeded { + stack += "restartNeeded " + } + if u.ecertReenrollNeeded { + stack += "ecertReenrollNeeded " + } + if u.tlscertReenrollNeeded { + stack += "tlscertReenrollNeeded " + } + if u.ecertNewKeyReenroll { + stack += "ecertNewKeyReenroll " + } + if u.tlscertNewKeyReenroll { + stack += "tlscertNewKeyReenroll " + } + + if u.deploymentUpdated { + stack += "deploymentUpdated " + } + if u.mspUpdated { + stack += "mspUpdated " + } + if u.ecertEnroll { + stack += "ecertEnroll " + } + if u.tlscertEnroll { + stack += "tlscertEnroll " + } + if u.tlsCertCreated { + stack += "tlsCertCreated " + } + if u.ecertCreated { + stack += "ecertCreated " + } + if u.migrateToV2 { + stack += "migrateToV2 " + } + if u.migrateToV24 { + stack += "migrateToV24 " + } + if u.nodeOUUpdated { + stack += "nodeOUUpdated " + } + if u.imagesUpdated { + stack += "imagesUpdated " + } + if u.fabricVersionUpdated { + stack += "fabricVersionUpdated " + } + + if len(stack) == 0 { + stack = "emptystack " + } + + return stack +} + +// ImagesUpdated returns true if images updated +func (u *Update) ImagesUpdated() bool { + return u.imagesUpdated +} + +// FabricVersionUpdated returns true if fabric version updated +func (u *Update) FabricVersionUpdated() bool { + return u.fabricVersionUpdated +} + +func imagesUpdated(old, new *current.IBPOrderer) bool { + if new.Spec.Images != nil { + if old.Spec.Images == nil { + return true + } + + if old.Spec.Images != nil { + return !reflect.DeepEqual(old.Spec.Images, new.Spec.Images) + } + } + + return false +} + +func fabricVersionUpdated(old, new *current.IBPOrderer) bool { + return old.Spec.FabricVersion != new.Spec.FabricVersion +} + +func fabricVersionUpdatedTo149plusOr221plus(old, new *current.IBPOrderer) bool { + newVersion := version.String(new.Spec.FabricVersion) + newMajorVersion := version.GetMajorReleaseVersion(new.Spec.FabricVersion) + oldVersion := version.String(old.Spec.FabricVersion) + oldMajorVersion := version.GetMajorReleaseVersion(old.Spec.FabricVersion) + + if old.Spec.FabricVersion != new.Spec.FabricVersion { + if oldVersion == version.Unsupported || (oldMajorVersion == version.V1 && oldVersion.LessThan("1.4.9")) { + if newMajorVersion == version.V1 { + if newVersion.Equal("1.4.9") || newVersion.GreaterThan("1.4.9") { + return true + } + } else if newMajorVersion == version.V2 { + if newVersion.Equal("2.2.1") || newVersion.GreaterThan("2.2.1") { + return true + } + } + } else if oldMajorVersion == version.V2 && oldVersion.LessThan("2.2.1") { + if newVersion.Equal("2.2.1") || newVersion.GreaterThan("2.2.1") { + return true + } + } + } + + return false +} diff --git a/controllers/ibporderer/predicate_test.go b/controllers/ibporderer/predicate_test.go new file mode 100644 index 00000000..776e8b97 --- /dev/null +++ b/controllers/ibporderer/predicate_test.go @@ -0,0 +1,291 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibporderer + +import ( + "context" + "encoding/json" + "fmt" + "sync" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + orderermocks "github.com/IBM-Blockchain/fabric-operator/controllers/ibporderer/mocks" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + yaml "sigs.k8s.io/yaml" +) + +var _ = Describe("predicate", func() { + var ( + reconciler *ReconcileIBPOrderer + instance *current.IBPOrderer + mockKubeClient *mocks.Client + mockOrdererReconcile *orderermocks.OrdererReconcile + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{ + ListStub: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.NodeList: + nodeList := obj.(*corev1.NodeList) + node := corev1.Node{} + node.Labels = map[string]string{} + node.Labels["topology.kubernetes.io/zone"] = "dal" + node.Labels["topology.kubernetes.io/region"] = "us-south" + nodeList.Items = append(nodeList.Items, node) + case *current.IBPOrdererList: + ordererList := obj.(*current.IBPOrdererList) + o1 := current.IBPOrderer{} + o1.Name = "test-orderer1" + o2 := current.IBPOrderer{} + o2.Name = "test-orderer2" + o3 := current.IBPOrderer{} + o3.Name = "test-orderer2" + ordererList.Items = []current.IBPOrderer{o1, o2, o3} + } + return nil + }, + } + + mockOrdererReconcile = &orderermocks.OrdererReconcile{} + nodeNumber := 1 + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + ClusterSize: 3, + NodeNumber: &nodeNumber, + }, + } + + reconciler = &ReconcileIBPOrderer{ + Offering: mockOrdererReconcile, + client: mockKubeClient, + scheme: &runtime.Scheme{}, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + } + }) + + Context("create func predicate", func() { + var ( + orderer *current.IBPOrderer + e event.CreateEvent + ) + + BeforeEach(func() { + orderer = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.GetName(), + }, + Status: current.IBPOrdererStatus{ + CRStatus: current.CRStatus{ + Type: current.Deployed, + }, + }, + } + e = event.CreateEvent{ + Object: orderer, + } + }) + + It("sets update flags to false if instance has status type and a create event is detected but no spec changes are detected", func() { + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + specUpdated: false, + overridesUpdated: false, + })) + }) + + It("sets update flags to true if instance has status type and a create event is detected and spec changes detected", func() { + configOverride := &config.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + LedgerType: "type1", + }, + }, + } + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + spec := current.IBPOrdererSpec{ + ImagePullSecrets: []string{"pullsecret1"}, + ConfigOverride: &runtime.RawExtension{Raw: configBytes}, + } + binaryData, err := yaml.Marshal(spec) + Expect(err).NotTo(HaveOccurred()) + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + o.BinaryData = map[string][]byte{ + "spec": binaryData, + } + } + return nil + } + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + specUpdated: true, + overridesUpdated: true, + })) + }) + + It("does not trigger update if instance does not have status type and a create event is detected", func() { + orderer.Status.Type = "" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("returns true but does not trigger update if new instance's name is unique to one IBPOrderer in list of IBPOrderers", func() { + orderer.Status.Type = "" + orderer.Name = "test-orderer1" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("returns false if new instance's name already exists for another IBPOrderer custom resource", func() { + orderer.Status.Type = "" + orderer.Name = "test-orderer2" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + Expect(orderer.Status.Type).To(Equal(current.Error)) + }) + + Context("secret created", func() { + var ( + cert *corev1.Secret + e event.CreateEvent + ) + + BeforeEach(func() { + cert = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {Name: instance.Name, + Kind: "IBPOrderer"}, + }, + }, + } + e = event.CreateEvent{} + }) + + It("sets update flags to true if create event is detected for secret and secret is a TLS signcert", func() { + cert.Name = fmt.Sprintf("tls-%s-signcert", instance.Name) + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + tlsCertCreated: true, + })) + }) + + It("sets update flags to true if create event is detected for secret and secret is an ecert signcert", func() { + cert.Name = fmt.Sprintf("ecert-%s-signcert", instance.Name) + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + ecertCreated: true, + })) + }) + + It("does not set update flags and doesn't trigger create event if create event is detected for secret and secret is not a signcert", func() { + cert.Name = fmt.Sprintf("tls-%s-admincert", instance.Name) + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("does not set update flags and doesn't trigger create event if create event is detected for non-orderer secret", func() { + cert.Name = "tls-peer1-signcert" + cert.OwnerReferences = nil + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("does not set update flags if create event is detected for secret with non-orderer owner", func() { + cert.Name = "tls-peer1-signcert" + cert.OwnerReferences[0].Kind = "IBPPeer" + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + tlsCertCreated: false, + })) + }) + }) + + Context("remove element", func() { + BeforeEach(func() { + reconciler.PushUpdate(instance.Name, Update{ + overridesUpdated: true, + }) + + reconciler.PushUpdate(instance.Name, Update{ + specUpdated: true, + }) + + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatusAtElement(instance, 1).SpecUpdated()).To(Equal(true)) + }) + + It("removes top element", func() { + reconciler.PopUpdate(instance.Name) + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(true)) + }) + + It("removing more elements than in slice should not panic", func() { + reconciler.PopUpdate(instance.Name) + reconciler.PopUpdate(instance.Name) + reconciler.PopUpdate(instance.Name) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(false)) + }) + }) + }) +}) diff --git a/controllers/ibppeer/ibppeer_controller.go b/controllers/ibppeer/ibppeer_controller.go new file mode 100644 index 00000000..bb11c4af --- /dev/null +++ b/controllers/ibppeer/ibppeer_controller.go @@ -0,0 +1,939 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibppeer + +import ( + "context" + "fmt" + "os" + "reflect" + "strings" + "sync" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoncontroller "github.com/IBM-Blockchain/fabric-operator/controllers/common" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + k8speer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/peer" + openshiftpeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + yaml "sigs.k8s.io/yaml" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + KIND = "IBPPeer" +) + +var log = logf.Log.WithName("controller_ibppeer") + +type CoreConfig interface { + GetMaxNameLength() *int +} + +// Add creates a new IBPPeer Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager, config *config.Config) error { + r, err := newReconciler(mgr, config) + if err != nil { + return err + } + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, cfg *config.Config) (*ReconcileIBPPeer, error) { + client := controllerclient.New(mgr.GetClient(), &global.ConfigSetter{Config: cfg.Operator.Globals}) + scheme := mgr.GetScheme() + + ibppeer := &ReconcileIBPPeer{ + client: client, + scheme: scheme, + Config: cfg, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + RestartService: staggerrestarts.New(client, cfg.Operator.Restart.Timeout.Get()), + } + + restClient, err := clientset.NewForConfig(mgr.GetConfig()) + if err != nil { + return nil, err + } + + switch cfg.Offering { + case offering.K8S: + ibppeer.Offering = k8speer.New(client, scheme, cfg) + case offering.OPENSHIFT: + ibppeer.Offering = openshiftpeer.New(client, scheme, cfg, restClient) + } + + return ibppeer, nil +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r *ReconcileIBPPeer) error { + // Create a new controller + predicateFuncs := predicate.Funcs{ + CreateFunc: r.CreateFunc, + UpdateFunc: r.UpdateFunc, + DeleteFunc: r.DeleteFunc, + } + + c, err := controller.New("ibppeer-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource IBPPeer + err = c.Watch(&source.Kind{Type: ¤t.IBPPeer{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + // Watch for changes to config maps (Create and Update funcs handle only watching for restart config map) + err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, predicateFuncs) + if err != nil { + return err + } + + // Watch for changes to secondary resource Pods and requeue the owner IBPPeer + err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPPeer{}, + }) + if err != nil { + return err + } + + // Watch for changes to tertiary resource Secrets and requeue the owner IBPPeer + err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPPeer{}, + }) + if err != nil { + return err + } + + // Watch for changes to tertiary resource Secrets and requeue the owner IBPPeer + err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: ¤t.IBPPeer{}, + }, predicateFuncs) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileIBPPeer{} + +//go:generate counterfeiter -o mocks/peerreconcile.go -fake-name PeerReconcile . peerReconcile + +type peerReconcile interface { + Reconcile(*current.IBPPeer, basepeer.Update) (common.Result, error) +} + +// ReconcileIBPPeer reconciles a IBPPeer object +type ReconcileIBPPeer struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client controllerclient.Client + scheme *runtime.Scheme + + k8sSecret *corev1.Secret + + Offering peerReconcile + Config *config.Config + RestartService *staggerrestarts.StaggerRestartsService + + update map[string][]Update + mutex *sync.Mutex +} + +// Reconcile reads that state of the cluster for a IBPPeer object and makes changes based on the state read +// and what is in the IBPPeer.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileIBPPeer) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + var err error + + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + + // If peer-restart-config configmap is the object being reconciled, reconcile the + // restart configmap. + if request.Name == "peer-restart-config" { + requeue, err := r.ReconcileRestart(request.Namespace) + // Error reconciling restart - requeue the request. + if err != nil { + return reconcile.Result{}, err + } + // Restart reconciled, requeue request if required. + return reconcile.Result{ + Requeue: requeue, + }, nil + } + + reqLogger.Info("Reconciling IBPPeer") + + // Fetch the IBPPeer instance + instance := ¤t.IBPPeer{} + err = r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + var maxNameLength *int + + co, err := instance.GetConfigOverride() + if err != nil { + return reconcile.Result{}, err + } + + configOverride := co.(CoreConfig) + maxNameLength = configOverride.GetMaxNameLength() + + err = util.ValidationChecks(instance.TypeMeta, instance.ObjectMeta, "IBPPeer", maxNameLength) + if err != nil { + return reconcile.Result{}, err + } + + reqLogger.Info(fmt.Sprintf("Current update stack to process: %+v", GetUpdateStack(r.update))) + + update := r.GetUpdateStatus(instance) + reqLogger.Info(fmt.Sprintf("Reconciling IBPPeer '%s' with update values of [ %+v ]", instance.GetName(), update.GetUpdateStackWithTrues())) + + result, err := r.Offering.Reconcile(instance, r.PopUpdate(instance.GetName())) + setStatusErr := r.SetStatus(instance, result.Status, err) + if setStatusErr != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(setStatusErr, "failed to update status", log) + } + + if err != nil { + return reconcile.Result{}, operatorerrors.IsBreakingError(errors.Wrapf(err, "Peer instance '%s' encountered error", instance.GetName()), "stopping reconcile loop", log) + } + + if result.Requeue { + r.PushUpdate(instance.GetName(), *update) + } + + reqLogger.Info(fmt.Sprintf("Finished reconciling IBPPeer '%s' with update values of [ %+v ]", instance.GetName(), update.GetUpdateStackWithTrues())) + + // If the stack still has items that require processing, keep reconciling + // until the stack has been cleared + _, found := r.update[instance.GetName()] + if found { + if len(r.update[instance.GetName()]) > 0 { + return reconcile.Result{ + Requeue: true, + }, nil + } + } + + return result.Result, nil +} + +func (r *ReconcileIBPPeer) SetStatus(instance *current.IBPPeer, reconcileStatus *current.CRStatus, reconcileErr error) error { + err := r.SaveSpecState(instance) + if err != nil { + return errors.Wrap(err, "failed to save spec state") + } + + // This is get is required but should not be, the reason we need to get the latest instance is because + // there is code between the reconcile start and SetStatus that ends up updating the instance. Since + // instance gets updated, but we are still working with original (outdated) version of instance, trying + // to update it fails with "object as been modified". + // + // TODO: Instance should only be updated at the start of reconcile (e.g. PreReconcileChecks), and if is updated + // the request should be requeued and not processed. The only only time the intance should be updated is in + // SetStatus + err = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.GetName(), Namespace: instance.GetNamespace()}, instance) + if err != nil { + return err + } + + status := instance.Status.CRStatus + + if reconcileErr != nil { + status.Type = current.Error + status.Status = current.True + status.Reason = "errorOccurredDuringReconcile" + status.Message = reconcileErr.Error() + status.LastHeartbeatTime = time.Now().String() + status.ErrorCode = operatorerrors.GetErrorCode(reconcileErr) + + instance.Status = current.IBPPeerStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status of IBPPeer custom resource to %s phase", instance.Status.Type)) + err = r.client.PatchStatus(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + + status.Versions.Reconciled = instance.Spec.FabricVersion + + // Check if reconcile loop returned an updated status that differs from exisiting status. + // If so, set status to the reconcile status. + if reconcileStatus != nil { + if instance.Status.Type != reconcileStatus.Type || instance.Status.Reason != reconcileStatus.Reason || instance.Status.Message != reconcileStatus.Message { + status.Type = reconcileStatus.Type + status.Status = current.True + status.Reason = reconcileStatus.Reason + status.Message = reconcileStatus.Message + status.LastHeartbeatTime = time.Now().String() + + instance.Status = current.IBPPeerStatus{ + CRStatus: status, + } + + log.Info(fmt.Sprintf("Updating status of IBPPeer custom resource to %s phase", instance.Status.Type)) + err := r.client.PatchStatus(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil + } + } + + running, err := r.GetPodStatus(instance) + if err != nil { + return err + } + + if running { + if instance.Status.Type == current.Deployed || instance.Status.Type == current.Warning { + return nil + } + status.Type = current.Deployed + status.Status = current.True + status.Reason = "allPodsRunning" + } else { + if instance.Status.Type == current.Deploying { + return nil + } + status.Type = current.Deploying + status.Status = current.True + status.Reason = "waitingForPods" + } + + instance.Status = current.IBPPeerStatus{ + CRStatus: status, + } + instance.Status.LastHeartbeatTime = time.Now().String() + log.Info(fmt.Sprintf("Updating status of IBPPeer custom resource to %s phase", instance.Status.Type)) + err = r.client.PatchStatus(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 2, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPPeer) SaveSpecState(instance *current.IBPPeer) error { + data, err := yaml.Marshal(instance.Spec) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + }, + BinaryData: map[string][]byte{ + "spec": data, + }, + } + + err = r.client.CreateOrUpdate(context.TODO(), cm, controllerclient.CreateOrUpdateOption{Owner: instance, Scheme: r.scheme}) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileIBPPeer) GetSpecState(instance *current.IBPPeer) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-spec", instance.GetName()), + Namespace: instance.GetNamespace(), + } + + err := r.client.Get(context.TODO(), nn, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func (r *ReconcileIBPPeer) GetPodStatus(instance *current.IBPPeer) (bool, error) { + labelSelector, err := labels.Parse(fmt.Sprintf("app=%s", instance.GetName())) + if err != nil { + return false, errors.Wrap(err, "failed to parse label selector for app name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: instance.GetNamespace(), + } + + podList := &corev1.PodList{} + err = r.client.List(context.TODO(), podList, listOptions) + if err != nil { + return false, err + } + + if len(podList.Items) == 0 { + return false, nil + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + } + + return true, nil +} + +func (r *ReconcileIBPPeer) getIgnoreDiffs() []string { + return []string{ + `Template\.Spec\.Containers\.slice\[\d\]\.Resources\.Requests\.map\[memory\].s`, + `Template\.Spec\.InitContainers\.slice\[\d\]\.Resources\.Requests\.map\[memory\].s`, + `Ports\.slice\[\d\]\.Protocol`, + } +} + +func (r *ReconcileIBPPeer) getSelectorLabels(instance *current.IBPPeer) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app": instance.Name, + "creator": label, + "orgname": instance.Spec.MSPID, + "release": "operator", + "helm.sh/chart": "ibm-" + label, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "peer", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (r *ReconcileIBPPeer) CreateFunc(e event.CreateEvent) bool { + update := Update{} + + switch e.Object.(type) { + case *current.IBPPeer: + peer := e.Object.(*current.IBPPeer) + log.Info(fmt.Sprintf("Create event detected for peer '%s'", peer.GetName())) + + if peer.Status.HasType() { + cm, err := r.GetSpecState(peer) + if err != nil { + log.Info(fmt.Sprintf("Failed getting saved peer spec '%s', can't perform update checks, triggering reconcile: %s", peer.GetName(), err.Error())) + return true + } + + specBytes := cm.BinaryData["spec"] + savedPeer := ¤t.IBPPeer{} + + err = yaml.Unmarshal(specBytes, &savedPeer.Spec) + if err != nil { + log.Info(fmt.Sprintf("Unmarshal failed for saved peer spec '%s', can't perform update checks, triggering reconcile: %s", peer.GetName(), err.Error())) + return true + } + + if !reflect.DeepEqual(peer.Spec, savedPeer.Spec) { + log.Info(fmt.Sprintf("IBPPeer '%s' spec was updated while operator was down", peer.GetName())) + update.specUpdated = true + } + + if !reflect.DeepEqual(peer.Spec.ConfigOverride, savedPeer.Spec.ConfigOverride) { + log.Info(fmt.Sprintf("IBPPeer '%s' overrides were updated while operator was down", peer.GetName())) + update.overridesUpdated = true + } + + update.imagesUpdated = imagesUpdated(savedPeer, peer) + update.fabricVersionUpdated = fabricVersionUpdated(savedPeer, peer) + + log.Info(fmt.Sprintf("Create event triggering reconcile for updating peer '%s'", peer.GetName())) + r.PushUpdate(peer.GetName(), update) + return true + } + + // If creating resource for the first time, check that a unique name is provided + err := commoncontroller.ValidateCRName(r.client, peer.Name, peer.Namespace, commoncontroller.IBPPEER) + if err != nil { + log.Error(err, "failed to validate peer name") + operror := operatorerrors.Wrap(err, operatorerrors.InvalidCustomResourceCreateRequest, "failed to validate custom resource name") + err = r.SetStatus(peer, nil, operror) + if err != nil { + log.Error(err, "failed to set status to error", "peer.name", peer.Name, "error", "InvalidCustomResourceCreateRequest") + } + return false + } + + log.Info(fmt.Sprintf("Create event triggering reconcile for creating peer '%s'", peer.GetName())) + + case *corev1.Secret: + secret := e.Object.(*corev1.Secret) + + if secret.OwnerReferences == nil || len(secret.OwnerReferences) == 0 { + isPeerSecret, err := r.AddOwnerReferenceToSecret(secret) + if err != nil || !isPeerSecret { + return false + } + } + + if secret.OwnerReferences[0].Kind == KIND { + log.Info(fmt.Sprintf("Create event detected for secret '%s'", secret.GetName())) + instanceName := secret.OwnerReferences[0].Name + + if util.IsSecretTLSCert(secret.Name) { + update.tlsCertCreated = true + log.Info(fmt.Sprintf("TLS cert create detected on IBPPeer custom resource %s", instanceName)) + } else if util.IsSecretEcert(secret.Name) { + update.ecertCreated = true + log.Info(fmt.Sprintf("Ecert create detected on IBPPeer custom resource %s", instanceName)) + } else { + return false + } + + log.Info(fmt.Sprintf("Peer crypto create triggering reconcile on IBPPeer custom resource %s: update [ %+v ]", instanceName, update.GetUpdateStackWithTrues())) + r.PushUpdate(instanceName, update) + } + + case *appsv1.Deployment: + dep := e.Object.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Create event detected by IBPPeer controller for deployment '%s', triggering reconcile", dep.GetName())) + case *corev1.ConfigMap: + cm := e.Object.(*corev1.ConfigMap) + if cm.Name == "peer-restart-config" { + log.Info(fmt.Sprintf("Create event detected by IBPPeer contoller for config map '%s', triggering restart reconcile", cm.GetName())) + } else { + return false + } + + } + + return true +} + +func (r *ReconcileIBPPeer) UpdateFunc(e event.UpdateEvent) bool { + update := Update{} + + switch e.ObjectOld.(type) { + case *current.IBPPeer: + oldPeer := e.ObjectOld.(*current.IBPPeer) + newPeer := e.ObjectNew.(*current.IBPPeer) + log.Info(fmt.Sprintf("Update event detected for peer '%s'", oldPeer.GetName())) + + if util.CheckIfZoneOrRegionUpdated(oldPeer.Spec.Zone, newPeer.Spec.Zone) { + log.Error(errors.New("Zone update is not allowed"), "invalid spec update") + return false + } + + if util.CheckIfZoneOrRegionUpdated(oldPeer.Spec.Region, newPeer.Spec.Region) { + log.Error(errors.New("Region update is not allowed"), "invalid spec update") + return false + } + + if reflect.DeepEqual(oldPeer.Spec, newPeer.Spec) { + return false + } + log.Info(fmt.Sprintf("%s spec updated", oldPeer.GetName())) + update.specUpdated = true + + // Check for changes to peer tag to determine if any migration logic needs to be executed + // from old peer version to new peer version + if oldPeer.Spec.Images != nil && newPeer.Spec.Images != nil { + if oldPeer.Spec.Images.PeerTag != newPeer.Spec.Images.PeerTag { + log.Info(fmt.Sprintf("Peer tag update from %s to %s", oldPeer.Spec.Images.PeerTag, newPeer.Spec.Images.PeerTag)) + update.peerTagUpdated = true + } + } + + if !reflect.DeepEqual(oldPeer.Spec.ConfigOverride, newPeer.Spec.ConfigOverride) { + log.Info(fmt.Sprintf("%s config override updated", oldPeer.GetName())) + update.overridesUpdated = true + } + + update.mspUpdated = commoncontroller.MSPInfoUpdateDetected(oldPeer.Spec.Secret, newPeer.Spec.Secret) + + if newPeer.Spec.Action.Restart == true { + update.restartNeeded = true + } + + if oldPeer.Spec.Action.Reenroll.Ecert != newPeer.Spec.Action.Reenroll.Ecert { + update.ecertReenrollNeeded = newPeer.Spec.Action.Reenroll.Ecert + } + + if oldPeer.Spec.Action.Reenroll.TLSCert != newPeer.Spec.Action.Reenroll.TLSCert { + update.tlsReenrollNeeded = newPeer.Spec.Action.Reenroll.TLSCert + } + + if oldPeer.Spec.Action.Reenroll.EcertNewKey != newPeer.Spec.Action.Reenroll.EcertNewKey { + update.ecertNewKeyReenroll = newPeer.Spec.Action.Reenroll.EcertNewKey + } + + if oldPeer.Spec.Action.Reenroll.TLSCertNewKey != newPeer.Spec.Action.Reenroll.TLSCertNewKey { + update.tlscertNewKeyReenroll = newPeer.Spec.Action.Reenroll.TLSCertNewKey + } + + oldVer := version.String(oldPeer.Spec.FabricVersion) + newVer := version.String(newPeer.Spec.FabricVersion) + + // check if this V1 -> V2.2.x / V2.4.x peer migration + if (oldPeer.Spec.FabricVersion == "" || + version.GetMajorReleaseVersion(oldPeer.Spec.FabricVersion) == version.V1) && + version.GetMajorReleaseVersion(newPeer.Spec.FabricVersion) == version.V2 { + update.migrateToV2 = true + if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { + update.migrateToV24 = true + } + } + + // check if this V2.2.x -> V2.4.x peer migration + if (version.GetMajorReleaseVersion(oldPeer.Spec.FabricVersion) == version.V2) && + oldVer.LessThan(version.V2_4_1) && + (newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1)) { + update.migrateToV24 = true + } + + if newPeer.Spec.Action.UpgradeDBs == true { + update.upgradedbs = true + } + + if newPeer.Spec.Action.Enroll.Ecert == true { + update.ecertEnroll = true + } + + if newPeer.Spec.Action.Enroll.TLSCert == true { + update.tlscertEnroll = true + } + + if oldPeer.Spec.NodeOUDisabled() != newPeer.Spec.NodeOUDisabled() { + update.nodeOUUpdated = true + } + + // if use updates NumSecondsWarningPeriod field once we have already run the reconcile + // we need to retrigger the timer logic + if oldPeer.Spec.NumSecondsWarningPeriod != newPeer.Spec.NumSecondsWarningPeriod { + update.ecertUpdated = true + update.tlsCertUpdated = true + log.Info(fmt.Sprintf("%s NumSecondsWarningPeriod updated", oldPeer.Name)) + } + + update.imagesUpdated = imagesUpdated(oldPeer, newPeer) + update.fabricVersionUpdated = fabricVersionUpdated(oldPeer, newPeer) + + log.Info(fmt.Sprintf("Spec update triggering reconcile on IBPPeer custom resource %s, update [ %+v ]", oldPeer.Name, update.GetUpdateStackWithTrues())) + r.PushUpdate(oldPeer.Name, update) + return true + + case *corev1.Secret: + oldSecret := e.ObjectOld.(*corev1.Secret) + newSecret := e.ObjectNew.(*corev1.Secret) + + if oldSecret.OwnerReferences == nil || len(oldSecret.OwnerReferences) == 0 { + isPeerSecret, err := r.AddOwnerReferenceToSecret(oldSecret) + if err != nil || !isPeerSecret { + return false + } + } + + if oldSecret.OwnerReferences[0].Kind == KIND { + if reflect.DeepEqual(oldSecret.Data, newSecret.Data) { + return false + } + + log.Info(fmt.Sprintf("Update event detected on secret '%s'", oldSecret.GetName())) + instanceName := oldSecret.OwnerReferences[0].Name + if util.IsSecretTLSCert(oldSecret.Name) { + update.tlsCertUpdated = true + log.Info(fmt.Sprintf("TLS cert update detected on IBPPeer custom resource %s", instanceName)) + } else if util.IsSecretEcert(oldSecret.Name) { + update.ecertUpdated = true + log.Info(fmt.Sprintf("ecert update detected on IBPPeer custom resource %s", instanceName)) + } else { + return false + } + + log.Info(fmt.Sprintf("Peer crypto update triggering reconcile on IBPPeer custom resource %s: update [ %+v ]", instanceName, update.GetUpdateStackWithTrues())) + r.PushUpdate(instanceName, update) + return true + } + + case *appsv1.Deployment: + oldDeployment := e.ObjectOld.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Spec update detected by IBPPeer controller on deployment '%s'", oldDeployment.GetName())) + + case *corev1.ConfigMap: + cm := e.ObjectOld.(*corev1.ConfigMap) + if cm.Name == "peer-restart-config" { + log.Info("Update event detected for peer-restart-config, triggering restart reconcile") + return true + } + + } + + return false +} + +// DeleteFunc will perform any necessary clean up, such as removing artificates that were +// left dangling after the deletion of the peer resource +func (r *ReconcileIBPPeer) DeleteFunc(e event.DeleteEvent) bool { + switch e.Object.(type) { + case *current.IBPPeer: + peer := e.Object.(*current.IBPPeer) + log.Info(fmt.Sprintf("Peer (%s) deleted", peer.GetName())) + + // Deleting this config map manually, in 2.5.1 release of operator this config map was created + // without proper controller references set and was not cleaned up on peer resource deletion. + log.Info(fmt.Sprintf("Deleting %s-init-config config map, if found", peer.GetName())) + if err := r.client.Delete(context.TODO(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-config", peer.GetName()), + Namespace: peer.GetNamespace(), + }, + }); client.IgnoreNotFound(err) != nil { + log.Info(fmt.Sprintf("failed to delete config map: %s", err)) + } + + case *appsv1.Deployment: + dep := e.Object.(*appsv1.Deployment) + log.Info(fmt.Sprintf("Delete detected by IBPPeer controller on deployment '%s'", dep.GetName())) + case *corev1.Secret: + secret := e.Object.(*corev1.Secret) + log.Info(fmt.Sprintf("Delete detected by IBPPeer controller on secret '%s'", secret.GetName())) + case *corev1.ConfigMap: + cm := e.Object.(*corev1.ConfigMap) + log.Info(fmt.Sprintf("Delete detected by IBPPeer controller on configmap '%s'", cm.GetName())) + + } + + return true +} + +func (r *ReconcileIBPPeer) GetUpdateStatusAtElement(instance *current.IBPPeer, index int) *Update { + r.mutex.Lock() + defer r.mutex.Unlock() + + update := Update{} + _, ok := r.update[instance.GetName()] + if !ok { + return &update + } + + if len(r.update[instance.GetName()]) >= 1 { + update = r.update[instance.GetName()][index] + } + + return &update +} + +func (r *ReconcileIBPPeer) GetUpdateStatus(instance *current.IBPPeer) *Update { + return r.GetUpdateStatusAtElement(instance, 0) +} + +func (r *ReconcileIBPPeer) PushUpdate(instanceName string, update Update) { + r.mutex.Lock() + defer r.mutex.Unlock() + + r.update[instanceName] = r.AppendUpdateIfMissing(r.update[instanceName], update) +} + +func (r *ReconcileIBPPeer) PopUpdate(instanceName string) *Update { + r.mutex.Lock() + defer r.mutex.Unlock() + + update := Update{} + if len(r.update[instanceName]) >= 1 { + update = r.update[instanceName][0] + if len(r.update[instanceName]) == 1 { + r.update[instanceName] = []Update{} + } else { + r.update[instanceName] = r.update[instanceName][1:] + } + } + + return &update +} + +func (r *ReconcileIBPPeer) AppendUpdateIfMissing(updates []Update, update Update) []Update { + for _, u := range updates { + if u == update { + return updates + } + } + return append(updates, update) +} + +func (r *ReconcileIBPPeer) AddOwnerReferenceToSecret(secret *corev1.Secret) (bool, error) { + // Peer secrets we are looking to add owner references to are named: + // -- + // -init-rootcert + + // The following secrets are created by operator, and will have owner references: + // -genesis + // -crypto-backup + // -secret + + items := strings.Split(secret.Name, "-") + if len(items) < 3 { + // Secret names we are looking for will be split into at least 3 strings: + // [prefix, instance name, type] OR [instance name, "init", "rootcert"] + return false, nil + } + + // Account for the case where the instance's name is hyphenated + var instanceName string + if strings.Contains(secret.Name, "-init-rootcert") { + instanceName = strings.Join(items[:len(items)-2], "-") // instance name contains all but last 2 items + } else { + instanceName = strings.Join(items[1:len(items)-1], "-") // instance name contains all but first and last item + } + + listOptions := &client.ListOptions{ + Namespace: secret.Namespace, + } + + peerList := ¤t.IBPPeerList{} + err := r.client.List(context.TODO(), peerList, listOptions) + if err != nil { + return false, errors.Wrap(err, "failed to get list of peers") + } + + for _, o := range peerList.Items { + peer := o + if peer.Name == instanceName { + // Instance 'i' found in list of orderers + err := r.client.Update(context.TODO(), secret, controllerclient.UpdateOption{ + Owner: &peer, + Scheme: r.scheme, + }) + if err != nil { + return false, err + } + return true, nil + } + } + + return false, nil +} + +func (r *ReconcileIBPPeer) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(¤t.IBPPeer{}). + Complete(r) +} + +func GetUpdateStack(allUpdates map[string][]Update) string { + stack := "" + + for orderer, updates := range allUpdates { + currentStack := "" + for index, update := range updates { + currentStack += fmt.Sprintf("{ %s}", update.GetUpdateStackWithTrues()) + if index != len(updates)-1 { + currentStack += " , " + } + } + stack += fmt.Sprintf("%s: [ %s ] ", orderer, currentStack) + } + + return stack +} + +func (r *ReconcileIBPPeer) ReconcileRestart(namespace string) (bool, error) { + requeue, err := r.RestartService.Reconcile("peer", namespace) + if err != nil { + log.Error(err, "failed to reconcile restart queues in peer-restart-config") + return false, err + } + + return requeue, nil +} diff --git a/controllers/ibppeer/ibppeer_controller_test.go b/controllers/ibppeer/ibppeer_controller_test.go new file mode 100644 index 00000000..ee5ab516 --- /dev/null +++ b/controllers/ibppeer/ibppeer_controller_test.go @@ -0,0 +1,919 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibppeer + +import ( + "context" + "errors" + "fmt" + "sync" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + peermocks "github.com/IBM-Blockchain/fabric-operator/controllers/ibppeer/mocks" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + yaml "sigs.k8s.io/yaml" +) + +var _ = Describe("ReconcileIBPPeer", func() { + var ( + reconciler *ReconcileIBPPeer + request reconcile.Request + mockKubeClient *mocks.Client + mockPeerReconcile *peermocks.PeerReconcile + instance *current.IBPPeer + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + mockPeerReconcile = &peermocks.PeerReconcile{} + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerTag: "1.4.9-2511004", + }, + }, + } + instance.Name = "test-peer" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPPeer: + o := obj.(*current.IBPPeer) + o.Kind = "IBPPeer" + o.Name = instance.Name + + instance = o + case *corev1.Service: + o := obj.(*corev1.Service) + o.Spec.Type = corev1.ServiceTypeNodePort + o.Spec.Ports = append(o.Spec.Ports, corev1.ServicePort{ + Name: "peer-api", + TargetPort: intstr.IntOrString{ + IntVal: 7051, + }, + NodePort: int32(7051), + }) + } + return nil + } + + mockKubeClient.UpdateStatusStub = func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + switch obj.(type) { + case *current.IBPPeer: + o := obj.(*current.IBPPeer) + instance = o + } + return nil + } + + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + switch obj.(type) { + case *corev1.NodeList: + nodeList := obj.(*corev1.NodeList) + node := corev1.Node{} + node.Labels = map[string]string{} + node.Labels["topology.kubernetes.io/zone"] = "dal" + node.Labels["topology.kubernetes.io/region"] = "us-south" + nodeList.Items = append(nodeList.Items, node) + case *current.IBPPeerList: + peerList := obj.(*current.IBPPeerList) + p1 := current.IBPPeer{} + p1.Name = "test-peer1" + p2 := current.IBPPeer{} + p2.Name = "test-peer2" + p3 := current.IBPPeer{} + p3.Name = "test-peer2" + peerList.Items = []current.IBPPeer{p1, p2, p3} + } + return nil + } + + reconciler = &ReconcileIBPPeer{ + Offering: mockPeerReconcile, + client: mockKubeClient, + scheme: &runtime.Scheme{}, + update: map[string][]Update{}, + mutex: &sync.Mutex{}, + } + request = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test-namespace", + Name: "test", + }, + } + }) + + Context("Reconciles", func() { + It("does not return an error if the custom resource is 'not found'", func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error if the request to get custom resource return any other error besides 'not found'", func() { + alreadyExistsErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Message: "already exists", + Reason: metav1.StatusReasonAlreadyExists, + }, + } + mockKubeClient.GetReturns(alreadyExistsErr) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("already exists")) + }) + + It("returns an error if it encountered a non-breaking error", func() { + errMsg := "failed to reconcile deployment encountered breaking error" + mockPeerReconcile.ReconcileReturns(common.Result{}, errors.New(errMsg)) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("Peer instance '%s' encountered error: %s", instance.Name, errMsg))) + }) + + It("does not return an error if it encountered a breaking error", func() { + mockPeerReconcile.ReconcileReturns(common.Result{}, operatorerrors.New(operatorerrors.InvalidDeploymentCreateRequest, "failed to reconcile deployment encountered breaking error")) + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update reconcile", func() { + var ( + oldPeer *current.IBPPeer + newPeer *current.IBPPeer + oldSecret *corev1.Secret + newSecret *corev1.Secret + e event.UpdateEvent + ) + + BeforeEach(func() { + + configoverride := []byte(`{"peer": {"id": "peer1"} }`) + + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerTag: "1.4.6-20200101", + }, + ConfigOverride: &runtime.RawExtension{Raw: configoverride}, + }, + } + + configoverride2 := []byte(`{"peer": {"id": "peer2"} }`) + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerTag: "1.4.9-2511004", + }, + ConfigOverride: &runtime.RawExtension{Raw: configoverride2}, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerTag: "1.4.6-20200101", + }, + MSPID: "old-mspid", + }, + } + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerTag: "1.4.9-2511004", + }, + MSPID: "new-mspid", + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPPeer", + }, + }, + }, + } + + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPPeer", + }, + }, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ecert-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPPeer", + }, + }, + }, + Data: map[string][]byte{ + "test": []byte("data"), + }, + } + + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ecert-%s-signcert", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPPeer", + }, + }, + }, + Data: map[string][]byte{ + "test": []byte("newdata"), + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + + oldSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-admincerts", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPPeer", + }, + }, + }, + } + + newSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("tls-%s-admincerts", instance.Name), + OwnerReferences: []metav1.OwnerReference{ + { + Name: instance.Name, + Kind: "IBPPeer", + }, + }, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldSecret, + ObjectNew: newSecret, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{ + SignCerts: "testcert", + }, + }, + }, + }, + } + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + TLS: ¤t.MSP{ + SignCerts: "testcert", + }, + }, + }, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + }) + + It("properly pops update flags from stack", func() { + By("popping first update - config overrides", func() { + Expect(reconciler.GetUpdateStatus(instance).overridesUpdated).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance).peerTagUpdated).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + + }) + + By("popping second update - spec updated", func() { + Expect(reconciler.GetUpdateStatus(instance).overridesUpdated).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).specUpdated).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + By("popping third update - ecert updated", func() { + Expect(reconciler.GetUpdateStatus(instance).tlsCertUpdated).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).ecertUpdated).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + + By("popping fourth update - msp updated", func() { + Expect(reconciler.GetUpdateStatus(instance).tlsCertUpdated).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).ecertUpdated).To(Equal(false)) + + Expect(reconciler.GetUpdateStatus(instance).mspUpdated).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + + Expect(reconciler.GetUpdateStatus(instance).mspUpdated).To(Equal(false)) + }) + + }) + + Context("num seconds warning period updated", func() { + BeforeEach(func() { + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + NumSecondsWarningPeriod: 10, + }, + } + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + Spec: current.IBPPeerSpec{ + NumSecondsWarningPeriod: 20, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + + Expect(reconciler.UpdateFunc(e)).To(Equal(true)) + }) + + It("returns true if numSecondsWarningPeriod changed", func() { + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).TLSCertUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).EcertUpdated()).To(Equal(true)) + + _, err := reconciler.Reconcile(context.TODO(), request) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("enrollment information changes detection", func() { + BeforeEach(func() { + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + }) + + Context("ecert", func() { + It("returns false if new secret is nil", func() { + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).EcertEnroll()).To(Equal(false)) + }) + + It("returns false if new secret has ecert msp set along with enrollment inforamtion", func() { + oldPeer.Spec.Secret = ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + EnrollID: "id1", + }, + }, + } + newPeer.Spec.Secret = ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{}, + }, + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + EnrollID: "id2", + }, + }, + } + + newPeer.Spec.Action = current.PeerAction{ + Restart: true, + } + + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).EcertEnroll()).To(Equal(false)) + }) + }) + + Context("TLS", func() { + It("returns false if new secret is nil", func() { + Expect(reconciler.UpdateFunc(e)).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).EcertEnroll()).To(Equal(false)) + }) + + It("returns false if new secret has TLS msp set along with enrollment inforamtion", func() { + oldPeer.Spec.Secret = ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + EnrollID: "id1", + }, + }, + } + newPeer.Spec.Secret = ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{}, + }, + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + EnrollID: "id2", + }, + }, + } + + newPeer.Spec.Action = current.PeerAction{ + Restart: true, + } + + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).EcertEnroll()).To(Equal(false)) + }) + }) + }) + + Context("detect MSP updates", func() { + BeforeEach(func() { + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + }) + + It("returns false if only admin certs updated in new msp", func() { + oldPeer.Spec.Secret = ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{ + AdminCerts: []string{"oldcert"}, + }, + }, + } + newPeer.Spec.Secret = ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{ + AdminCerts: []string{"newcert"}, + }, + }, + } + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).MSPUpdated()).To(Equal(false)) + }) + }) + + Context("update node OU", func() { + BeforeEach(func() { + oldPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + + newPeer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + }, + } + newPeer.Spec.DisableNodeOU = ¤t.BoolTrue + + e = event.UpdateEvent{ + ObjectOld: oldPeer, + ObjectNew: newPeer, + } + }) + + It("returns true if node ou updated in spec", func() { + reconciler.UpdateFunc(e) + Expect(reconciler.GetUpdateStatusAtElement(instance, 4).NodeOUUpdated()).To(Equal(true)) + }) + }) + }) + + Context("set status", func() { + It("sets the status to error if error occured during IPBPPeer reconciliation", func() { + reconciler.SetStatus(instance, nil, errors.New("ibppeer error")) + Expect(instance.Status.Type).To(Equal(current.Error)) + Expect(instance.Status.Message).To(Equal("ibppeer error")) + }) + + It("sets the status to deploying if pod is not yet running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + podList := obj.(*corev1.PodList) + pod := corev1.Pod{} + podList.Items = append(podList.Items, pod) + return nil + } + reconciler.SetStatus(instance, nil, nil) + Expect(instance.Status.Type).To(Equal(current.Deploying)) + }) + + It("sets the status to deployed if pod is running", func() { + mockKubeClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + podList := obj.(*corev1.PodList) + pod := corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + } + podList.Items = append(podList.Items, pod) + return nil + } + + reconciler.SetStatus(instance, nil, nil) + Expect(instance.Status.Type).To(Equal(current.Deployed)) + }) + }) + + Context("create func predicate", func() { + Context("case: peer", func() { + var ( + peer *current.IBPPeer + e event.CreateEvent + ) + + BeforeEach(func() { + peer = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.GetName(), + }, + Status: current.IBPPeerStatus{ + CRStatus: current.CRStatus{ + Type: current.Deployed, + }, + }, + } + e = event.CreateEvent{ + Object: peer, + } + }) + + It("sets update flags to false if instance has status type and a create event is detected but no spec changes are detected", func() { + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + specUpdated: false, + overridesUpdated: false, + dindArgsUpdated: false, + })) + }) + + It("sets update flags to true if instance has status type and a create event is detected and spec changes detected", func() { + override := []byte("{}") + + spec := current.IBPPeerSpec{ + ImagePullSecrets: []string{"pullsecret1"}, + ConfigOverride: &runtime.RawExtension{Raw: override}, + } + binaryData, err := yaml.Marshal(spec) + Expect(err).NotTo(HaveOccurred()) + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + o.BinaryData = map[string][]byte{ + "spec": binaryData, + } + } + return nil + } + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + specUpdated: true, + overridesUpdated: true, + })) + }) + + It("does not trigger update if instance does not have status type and a create event is detected", func() { + peer.Status.Type = "" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("returns true but doesn't trigger update if new instance's name is unique to one IBPPeer in the list of IBPPeers", func() { + peer.Status.Type = "" + peer.Name = "test-peer1" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + + }) + + It("returns false if new instance's name already exists for another IBPPeer custom resource", func() { + peer.Status.Type = "" + peer.Name = "test-peer2" + + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + Expect(peer.Status.Type).To(Equal(current.Error)) + }) + }) + + Context("case: secret", func() { + var ( + cert *corev1.Secret + e event.CreateEvent + ) + + BeforeEach(func() { + cert = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {Name: instance.Name, + Kind: "IBPPeer"}, + }, + }, + } + e = event.CreateEvent{} + }) + + It("sets create flags to true if create event is detected for secret and secret is a TLS signcert", func() { + cert.Name = fmt.Sprintf("tls-%s-signcert", instance.Name) + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + tlsCertCreated: true, + })) + }) + + It("sets update flags to true if create event is detected for secret and secret is an ecert signcert", func() { + cert.Name = fmt.Sprintf("ecert-%s-signcert", instance.Name) + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + ecertCreated: true, + })) + }) + + It("does not set update flags and doesn't trigger create event if create event is detected for secret and secret is not a signcert", func() { + cert.Name = fmt.Sprintf("tls-%s-admincert", instance.Name) + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("does not set update flags and doesn't trigger create event if create event is detected for non-peer secret", func() { + cert.Name = "tls-orderer1-signcert" + cert.OwnerReferences = nil + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(false)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{})) + }) + + It("does not set update flags if create event is detected for secret with non-peer owner", func() { + cert.Name = "tls-orderer1-signcert" + cert.OwnerReferences[0].Kind = "IBPOrderer" + e.Object = cert + create := reconciler.CreateFunc(e) + Expect(create).To(Equal(true)) + + Expect(reconciler.GetUpdateStatus(instance)).To(Equal(&Update{ + tlsCertCreated: false, + })) + }) + }) + }) + + Context("remove element", func() { + BeforeEach(func() { + reconciler.PushUpdate(instance.Name, Update{ + overridesUpdated: true, + }) + + reconciler.PushUpdate(instance.Name, Update{ + specUpdated: true, + }) + + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(true)) + Expect(reconciler.GetUpdateStatusAtElement(instance, 1).SpecUpdated()).To(Equal(true)) + }) + + It("removes top element", func() { + reconciler.PopUpdate(instance.Name) + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(true)) + }) + + It("removing more elements than in slice should not panic", func() { + reconciler.PopUpdate(instance.Name) + reconciler.PopUpdate(instance.Name) + reconciler.PopUpdate(instance.Name) + Expect(reconciler.GetUpdateStatus(instance).SpecUpdated()).To(Equal(false)) + Expect(reconciler.GetUpdateStatus(instance).ConfigOverridesUpdated()).To(Equal(false)) + }) + }) + + Context("append update if missing", func() { + It("appends update", func() { + updates := []Update{{tlsCertUpdated: true}} + updates = reconciler.AppendUpdateIfMissing(updates, Update{ecertUpdated: true}) + Expect(len(updates)).To(Equal(2)) + }) + + It("doesn't append update that is already in stack", func() { + updates := []Update{{tlsCertUpdated: true}} + updates = reconciler.AppendUpdateIfMissing(updates, Update{tlsCertUpdated: true}) + Expect(len(updates)).To(Equal(1)) + }) + }) + + Context("push update", func() { + It("pushes update only if missing from stack of updates", func() { + reconciler.PushUpdate(instance.Name, Update{specUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(1)) + reconciler.PushUpdate(instance.Name, Update{tlsCertUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(2)) + reconciler.PushUpdate(instance.Name, Update{ecertUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(3)) + reconciler.PushUpdate(instance.Name, Update{tlsCertUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(3)) + reconciler.PushUpdate(instance.Name, Update{tlsCertUpdated: true, specUpdated: true}) + Expect(len(reconciler.update[instance.Name])).To(Equal(4)) + }) + }) + + Context("add owner reference to secret", func() { + var ( + secret *corev1.Secret + ) + + BeforeEach(func() { + secret = &corev1.Secret{} + secret.Name = "ecert-test-peer1-signcert" + }) + + It("returns error if fails to get list of peers", func() { + mockKubeClient.ListReturns(errors.New("list error")) + _, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("list error")) + }) + + It("returns false if secret doesn't belong to any peers in list", func() { + secret.Name = "tls-orderer1-signcert" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(false)) + }) + + It("returns true if owner references added to secret", func() { + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + + It("returns true if owner references added to init-rootcert secret", func() { + secret.Name = "test-peer1-init-rootcert" + added, err := reconciler.AddOwnerReferenceToSecret(secret) + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(true)) + }) + }) +}) diff --git a/controllers/ibppeer/ibppeer_suite_test.go b/controllers/ibppeer/ibppeer_suite_test.go new file mode 100644 index 00000000..ddb226dd --- /dev/null +++ b/controllers/ibppeer/ibppeer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibppeer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestIbppeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ibppeer Suite") +} diff --git a/controllers/ibppeer/mocks/peerreconcile.go b/controllers/ibppeer/mocks/peerreconcile.go new file mode 100644 index 00000000..98533fb6 --- /dev/null +++ b/controllers/ibppeer/mocks/peerreconcile.go @@ -0,0 +1,118 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" +) + +type PeerReconcile struct { + ReconcileStub func(*v1beta1.IBPPeer, basepeer.Update) (common.Result, error) + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 basepeer.Update + } + reconcileReturns struct { + result1 common.Result + result2 error + } + reconcileReturnsOnCall map[int]struct { + result1 common.Result + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *PeerReconcile) Reconcile(arg1 *v1beta1.IBPPeer, arg2 basepeer.Update) (common.Result, error) { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 basepeer.Update + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *PeerReconcile) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *PeerReconcile) ReconcileCalls(stub func(*v1beta1.IBPPeer, basepeer.Update) (common.Result, error)) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *PeerReconcile) ReconcileArgsForCall(i int) (*v1beta1.IBPPeer, basepeer.Update) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *PeerReconcile) ReconcileReturns(result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *PeerReconcile) ReconcileReturnsOnCall(i int, result1 common.Result, result2 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 common.Result + result2 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 common.Result + result2 error + }{result1, result2} +} + +func (fake *PeerReconcile) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *PeerReconcile) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} diff --git a/controllers/ibppeer/predicate.go b/controllers/ibppeer/predicate.go new file mode 100644 index 00000000..2db6f869 --- /dev/null +++ b/controllers/ibppeer/predicate.go @@ -0,0 +1,293 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ibppeer + +import ( + "reflect" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" +) + +type Update struct { + specUpdated bool + overridesUpdated bool + dindArgsUpdated bool + tlsCertUpdated bool + ecertUpdated bool + peerTagUpdated bool + restartNeeded bool + ecertReenrollNeeded bool + tlsReenrollNeeded bool + ecertNewKeyReenroll bool + tlscertNewKeyReenroll bool + migrateToV2 bool + migrateToV24 bool + mspUpdated bool + ecertEnroll bool + tlscertEnroll bool + upgradedbs bool + tlsCertCreated bool + ecertCreated bool + nodeOUUpdated bool + imagesUpdated bool + fabricVersionUpdated bool + // update GetUpdateStackWithTrues when new fields are added +} + +func (u *Update) SpecUpdated() bool { + return u.specUpdated +} + +func (u *Update) ConfigOverridesUpdated() bool { + return u.overridesUpdated +} + +func (u *Update) DindArgsUpdated() bool { + return u.dindArgsUpdated +} + +func (u *Update) TLSCertUpdated() bool { + return u.tlsCertUpdated +} + +func (u *Update) EcertUpdated() bool { + return u.ecertUpdated +} + +func (u *Update) PeerTagUpdated() bool { + return u.peerTagUpdated +} + +func (u *Update) CertificateUpdated() bool { + return u.tlsCertUpdated || u.ecertUpdated +} + +func (u *Update) GetUpdatedCertType() commoninit.SecretType { + if u.tlsCertUpdated { + return commoninit.TLS + } else if u.ecertUpdated { + return commoninit.ECERT + } + return "" +} + +func (u *Update) RestartNeeded() bool { + return u.restartNeeded +} + +func (u *Update) EcertReenrollNeeded() bool { + return u.ecertReenrollNeeded +} + +func (u *Update) TLSReenrollNeeded() bool { + return u.tlsReenrollNeeded +} + +func (u *Update) EcertNewKeyReenroll() bool { + return u.ecertNewKeyReenroll +} + +func (u *Update) TLScertNewKeyReenroll() bool { + return u.tlscertNewKeyReenroll +} + +func (u *Update) MigrateToV2() bool { + return u.migrateToV2 +} + +func (u *Update) MigrateToV24() bool { + return u.migrateToV24 +} + +func (u *Update) UpgradeDBs() bool { + return u.upgradedbs +} + +func (u *Update) EcertEnroll() bool { + return u.ecertEnroll +} + +func (u *Update) TLSCertEnroll() bool { + return u.tlscertEnroll +} + +func (u *Update) SetDindArgsUpdated(updated bool) { + u.dindArgsUpdated = updated +} + +func (u *Update) MSPUpdated() bool { + return u.mspUpdated +} + +func (u *Update) TLSCertCreated() bool { + return u.tlsCertCreated +} + +func (u *Update) EcertCreated() bool { + return u.ecertCreated +} + +func (u *Update) CertificateCreated() bool { + return u.tlsCertCreated || u.ecertCreated +} + +func (u *Update) GetCreatedCertType() commoninit.SecretType { + if u.tlsCertCreated { + return commoninit.TLS + } else if u.ecertCreated { + return commoninit.ECERT + } + return "" +} + +func (u *Update) CryptoBackupNeeded() bool { + return u.ecertEnroll || + u.tlscertEnroll || + u.ecertReenrollNeeded || + u.tlsReenrollNeeded || + u.ecertNewKeyReenroll || + u.tlscertNewKeyReenroll || + u.mspUpdated +} + +func (u *Update) NodeOUUpdated() bool { + return u.nodeOUUpdated +} + +// ImagesUpdated returns true if images updated +func (u *Update) ImagesUpdated() bool { + return u.imagesUpdated +} + +// FabricVersionUpdated returns true if fabric version updated +func (u *Update) FabricVersionUpdated() bool { + return u.fabricVersionUpdated +} + +func (u *Update) Needed() bool { + return u.specUpdated || + u.overridesUpdated || + u.dindArgsUpdated || + u.tlsCertUpdated || + u.ecertUpdated || + u.peerTagUpdated || + u.restartNeeded || + u.ecertReenrollNeeded || + u.tlsReenrollNeeded || + u.ecertNewKeyReenroll || + u.tlscertNewKeyReenroll || + u.migrateToV2 || + u.migrateToV24 || + u.mspUpdated || + u.ecertEnroll || + u.upgradedbs || + u.nodeOUUpdated || + u.imagesUpdated || + u.fabricVersionUpdated +} + +func (u *Update) GetUpdateStackWithTrues() string { + stack := "" + + if u.specUpdated { + stack += "specUpdated " + } + if u.overridesUpdated { + stack += "overridesUpdated " + } + if u.dindArgsUpdated { + stack += "dindArgsUpdated " + } + if u.tlsCertUpdated { + stack += "tlsCertUpdated " + } + if u.ecertUpdated { + stack += "ecertUpdated " + } + if u.peerTagUpdated { + stack += "peerTagUpdated " + } + if u.restartNeeded { + stack += "restartNeeded " + } + if u.ecertReenrollNeeded { + stack += "ecertReenrollNeeded" + } + if u.tlsReenrollNeeded { + stack += "tlsReenrollNeeded" + } + if u.migrateToV2 { + stack += "migrateToV2 " + } + if u.migrateToV24 { + stack += "migrateToV24 " + } + if u.mspUpdated { + stack += "mspUpdated " + } + if u.ecertEnroll { + stack += "ecertEnroll " + } + if u.tlscertEnroll { + stack += "tlscertEnroll " + } + if u.upgradedbs { + stack += "upgradedbs " + } + if u.tlsCertCreated { + stack += "tlsCertCreated " + } + if u.ecertCreated { + stack += "ecertCreated " + } + if u.nodeOUUpdated { + stack += "nodeOUUpdated " + } + if u.imagesUpdated { + stack += "imagesUpdated " + } + if u.fabricVersionUpdated { + stack += "fabricVersionUpdated " + } + + if len(stack) == 0 { + stack = "emptystack " + } + + return stack +} + +func imagesUpdated(old, new *current.IBPPeer) bool { + if new.Spec.Images != nil { + if old.Spec.Images == nil { + return true + } + + if old.Spec.Images != nil { + return !reflect.DeepEqual(old.Spec.Images, new.Spec.Images) + } + } + + return false +} + +func fabricVersionUpdated(old, new *current.IBPPeer) bool { + return old.Spec.FabricVersion != new.Spec.FabricVersion +} diff --git a/controllers/mocks/client.go b/controllers/mocks/client.go new file mode 100644 index 00000000..ee14505d --- /dev/null +++ b/controllers/mocks/client.go @@ -0,0 +1,746 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Client struct { + CreateStub func(context.Context, client.Object, ...controllerclient.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + CreateOrUpdateStub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + PatchStatusStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchStatusMutex sync.RWMutex + patchStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchStatusReturns struct { + result1 error + } + patchStatusReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...controllerclient.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + UpdateStatusStub func(context.Context, client.Object, ...client.UpdateOption) error + updateStatusMutex sync.RWMutex + updateStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateStatusReturns struct { + result1 error + } + updateStatusReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) Create(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Client) CreateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Client) CreateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdate(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOrUpdateOption) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *Client) CreateOrUpdateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *Client) CreateOrUpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOrUpdateOption) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *Client) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *Client) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Client) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Client) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Client) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Client) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *Client) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *Client) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatus(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchStatusMutex.Lock() + ret, specificReturn := fake.patchStatusReturnsOnCall[len(fake.patchStatusArgsForCall)] + fake.patchStatusArgsForCall = append(fake.patchStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStatusStub + fakeReturns := fake.patchStatusReturns + fake.recordInvocation("PatchStatus", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchStatusCallCount() int { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + return len(fake.patchStatusArgsForCall) +} + +func (fake *Client) PatchStatusCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = stub +} + +func (fake *Client) PatchStatusArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + argsForCall := fake.patchStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchStatusReturns(result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + fake.patchStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatusReturnsOnCall(i int, result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + if fake.patchStatusReturnsOnCall == nil { + fake.patchStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Update(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Client) UpdateCalls(stub func(context.Context, client.Object, ...controllerclient.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Client) UpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatus(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateStatusMutex.Lock() + ret, specificReturn := fake.updateStatusReturnsOnCall[len(fake.updateStatusArgsForCall)] + fake.updateStatusArgsForCall = append(fake.updateStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStatusStub + fakeReturns := fake.updateStatusReturns + fake.recordInvocation("UpdateStatus", []interface{}{arg1, arg2, arg3}) + fake.updateStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateStatusCallCount() int { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + return len(fake.updateStatusArgsForCall) +} + +func (fake *Client) UpdateStatusCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = stub +} + +func (fake *Client) UpdateStatusArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + argsForCall := fake.updateStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateStatusReturns(result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + fake.updateStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatusReturnsOnCall(i int, result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + if fake.updateStatusReturnsOnCall == nil { + fake.updateStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ controllerclient.Client = new(Client) diff --git a/controllers/suite_test.go b/controllers/suite_test.go new file mode 100644 index 00000000..1fb16213 --- /dev/null +++ b/controllers/suite_test.go @@ -0,0 +1,92 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + ibpv1beta1 "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.New()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = ibpv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = ibpv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = ibpv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = ibpv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/defaultconfig/ca/ca.yaml b/defaultconfig/ca/ca.yaml new file mode 100644 index 00000000..7e3e1f94 --- /dev/null +++ b/defaultconfig/ca/ca.yaml @@ -0,0 +1,516 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################# +# This is a configuration file for the fabric-ca-server command. +# +# COMMAND LINE ARGUMENTS AND ENVIRONMENT VARIABLES +# ------------------------------------------------ +# Each configuration element can be overridden via command line +# arguments or environment variables. The precedence for determining +# the value of each element is as follows: +# 1) command line argument +# Examples: +# a) --port 443 +# To set the listening port +# b) --ca.keyfile ../mykey.pem +# To set the "keyfile" element in the "ca" section below; +# note the '.' separator character. +# 2) environment variable +# Examples: +# a) FABRIC_CA_SERVER_PORT=443 +# To set the listening port +# b) FABRIC_CA_SERVER_CA_KEYFILE="../mykey.pem" +# To set the "keyfile" element in the "ca" section below; +# note the '_' separator character. +# 3) configuration file +# 4) default value (if there is one) +# All default values are shown beside each element below. +# +# FILE NAME ELEMENTS +# ------------------ +# The value of all fields whose name ends with "file" or "files" are +# name or names of other files. +# For example, see "tls.certfile" and "tls.clientauth.certfiles". +# The value of each of these fields can be a simple filename, a +# relative path, or an absolute path. If the value is not an +# absolute path, it is interpretted as being relative to the location +# of this configuration file. +# +############################################################################# + +# Server's listening port (default: 7054) +port: 7054 + +# Cross-Origin Resource Sharing (CORS) +cors: + enabled: false + origins: + - "*" + +# Enables debug logging (default: false) +debug: false + +# Size limit of an acceptable CRL in bytes (default: 512000) +crlsizelimit: 512000 + +############################################################################# +# TLS section for the server's listening port +# +# The following types are supported for client authentication: NoClientCert, +# RequestClientCert, RequireAnyClientCert, VerifyClientCertIfGiven, +# and RequireAndVerifyClientCert. +# +# Certfiles is a list of root certificate authorities that the server uses +# when verifying client certificates. +############################################################################# +tls: + # Enable TLS (default: false) + enabled: true + # TLS for the server's listening port + certfile: + keyfile: + clientauth: + type: noclientcert + certfiles: + +############################################################################# +# The CA section contains information related to the Certificate Authority +# including the name of the CA, which should be unique for all members +# of a blockchain network. It also includes the key and certificate files +# used when issuing enrollment certificates (ECerts) and transaction +# certificates (TCerts). +# The chainfile (if it exists) contains the certificate chain which +# should be trusted for this CA, where the 1st in the chain is always the +# root CA certificate. +############################################################################# +ca: + # Name of this CA + name: ca + # Key file (is only used to import a private key into BCCSP) + keyfile: + # Certificate file (default: ca-cert.pem) + certfile: + # Chain file + chainfile: + # Ignore Certificate Expiration in the case of re-enroll + reenrollIgnoreCertExpiry: true + +############################################################################# +# The gencrl REST endpoint is used to generate a CRL that contains revoked +# certificates. This section contains configuration options that are used +# during gencrl request processing. +############################################################################# +crl: + # Specifies expiration for the generated CRL. The number of hours + # specified by this property is added to the UTC time, the resulting time + # is used to set the 'Next Update' date of the CRL. + expiry: 24h + +############################################################################# +# The registry section controls how the fabric-ca-server does two things: +# 1) authenticates enrollment requests which contain a username and password +# (also known as an enrollment ID and secret). +# 2) once authenticated, retrieves the identity's attribute names and +# values which the fabric-ca-server optionally puts into TCerts +# which it issues for transacting on the Hyperledger Fabric blockchain. +# These attributes are useful for making access control decisions in +# chaincode. +# There are two main configuration options: +# 1) The fabric-ca-server is the registry. +# This is true if "ldap.enabled" in the ldap section below is false. +# 2) An LDAP server is the registry, in which case the fabric-ca-server +# calls the LDAP server to perform these tasks. +# This is true if "ldap.enabled" in the ldap section below is true, +# which means this "registry" section is ignored. +############################################################################# +registry: + # Maximum number of times a password/secret can be reused for enrollment + # (default: -1, which means there is no limit) + maxenrollments: -1 + + # Contains identity information which is used when LDAP is disabled + identities: + - name: admin + pass: adminpw + type: client + affiliation: "" + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + +############################################################################# +# Database section +# Supported types are: "sqlite3", "postgres", and "mysql". +# The datasource value depends on the type. +# If the type is "sqlite3", the datasource value is a file name to use +# as the database store. Since "sqlite3" is an embedded database, it +# may not be used if you want to run the fabric-ca-server in a cluster. +# To run the fabric-ca-server in a cluster, you must choose "postgres" +# or "mysql". +############################################################################# +db: + type: sqlite3 + datasource: fabric-ca-server.db + tls: + enabled: false + certfiles: + client: + certfile: + keyfile: + +############################################################################# +# LDAP section +# If LDAP is enabled, the fabric-ca-server calls LDAP to: +# 1) authenticate enrollment ID and secret (i.e. username and password) +# for enrollment requests; +# 2) To retrieve identity attributes +############################################################################# +ldap: + # Enables or disables the LDAP client (default: false) + # If this is set to true, the "registry" section is ignored. + enabled: false + # The URL of the LDAP server + url: ldap://:@:/ + # TLS configuration for the client connection to the LDAP server + tls: + certfiles: + client: + certfile: + keyfile: + # Attribute related configuration for mapping from LDAP entries to Fabric CA attributes + attribute: + # 'names' is an array of strings containing the LDAP attribute names which are + # requested from the LDAP server for an LDAP identity's entry + names: ['uid','member'] + # The 'converters' section is used to convert an LDAP entry to the value of + # a fabric CA attribute. + # For example, the following converts an LDAP 'uid' attribute + # whose value begins with 'revoker' to a fabric CA attribute + # named "hf.Revoker" with a value of "true" (because the boolean expression + # evaluates to true). + # converters: + # - name: hf.Revoker + # value: attr("uid") =~ "revoker*" + converters: + - name: + value: + # The 'maps' section contains named maps which may be referenced by the 'map' + # function in the 'converters' section to map LDAP responses to arbitrary values. + # For example, assume a user has an LDAP attribute named 'member' which has multiple + # values which are each a distinguished name (i.e. a DN). For simplicity, assume the + # values of the 'member' attribute are 'dn1', 'dn2', and 'dn3'. + # Further assume the following configuration. + # converters: + # - name: hf.Registrar.Roles + # value: map(attr("member"),"groups") + # maps: + # groups: + # - name: dn1 + # value: peer + # - name: dn2 + # value: client + # The value of the user's 'hf.Registrar.Roles' attribute is then computed to be + # "peer,client,dn3". This is because the value of 'attr("member")' is + # "dn1,dn2,dn3", and the call to 'map' with a 2nd argument of + # "group" replaces "dn1" with "peer" and "dn2" with "client". + maps: + groups: + - name: + value: + +############################################################################# +# Affiliations section. Fabric CA server can be bootstrapped with the +# affiliations specified in this section. Affiliations are specified as maps. +# For example: +# businessunit1: +# department1: +# - team1 +# businessunit2: +# - department2 +# - department3 +# +# Affiliations are hierarchical in nature. In the above example, +# department1 (used as businessunit1.department1) is the child of businessunit1. +# team1 (used as businessunit1.department1.team1) is the child of department1. +# department2 (used as businessunit2.department2) and department3 (businessunit2.department3) +# are children of businessunit2. +# Note: Affiliations are case sensitive except for the non-leaf affiliations +# (like businessunit1, department1, businessunit2) that are specified in the configuration file, +# which are always stored in lower case. +############################################################################# +affiliations: + +############################################################################# +# Signing section +# +# The "default" subsection is used to sign enrollment certificates; +# the default expiration ("expiry" field) is "8760h", which is 1 year in hours. +# +# The "ca" profile subsection is used to sign intermediate CA certificates; +# the default expiration ("expiry" field) is "43800h" which is 5 years in hours. +# Note that "isca" is true, meaning that it issues a CA certificate. +# A maxpathlen of 0 means that the intermediate CA cannot issue other +# intermediate CA certificates, though it can still issue end entity certificates. +# (See RFC 5280, section 4.2.1.9) +# +# The "tls" profile subsection is used to sign TLS certificate requests; +# the default expiration ("expiry" field) is "8760h", which is 1 year in hours. +############################################################################# +signing: + default: + usage: + - digital signature + expiry: 8760h + profiles: + ca: + usage: + - cert sign + - crl sign + expiry: 43800h + caconstraint: + isca: true + maxpathlen: 0 + tls: + usage: + - signing + - key encipherment + - server auth + - client auth + - key agreement + expiry: 87600h + +########################################################################### +# Certificate Signing Request (CSR) section. +# This controls the creation of the root CA certificate. +# The expiration for the root CA certificate is configured with the +# "ca.expiry" field below, whose default value is "131400h" which is +# 15 years in hours. +# The pathlength field is used to limit CA certificate hierarchy as described +# in section 4.2.1.9 of RFC 5280. +# Examples: +# 1) No pathlength value means no limit is requested. +# 2) pathlength == 1 means a limit of 1 is requested which is the default for +# a root CA. This means the root CA can issue intermediate CA certificates, +# but these intermediate CAs may not in turn issue other CA certificates +# though they can still issue end entity certificates. +# 3) pathlength == 0 means a limit of 0 is requested; +# this is the default for an intermediate CA, which means it can not issue +# CA certificates though it can still issue end entity certificates. +########################################################################### +csr: + cn: ca + keyrequest: + algo: ecdsa + size: 256 + names: + - C: US + ST: "North Carolina" + L: + O: Hyperledger + OU: Fabric + hosts: + - localhost + - 127.0.0.1 + ca: + expiry: 131400h + pathlength: 1 + +########################################################################### +# Each CA can issue both X509 enrollment certificate as well as Idemix +# Credential. This section specifies configuration for the issuer component +# that is responsible for issuing Idemix credentials. +########################################################################### +idemix: + # Specifies pool size for revocation handles. A revocation handle is an unique identifier of an + # Idemix credential. The issuer will create a pool revocation handles of this specified size. When + # a credential is requested, issuer will get handle from the pool and assign it to the credential. + # Issuer will repopulate the pool with new handles when the last handle in the pool is used. + # A revocation handle and credential revocation information (CRI) are used to create non revocation proof + # by the prover to prove to the verifier that her credential is not revoked. + rhpoolsize: 1000 + + # The Idemix credential issuance is a two step process. First step is to get a nonce from the issuer + # and second step is send credential request that is constructed using the nonce to the isuser to + # request a credential. This configuration property specifies expiration for the nonces. By default is + # nonces expire after 15 seconds. The value is expressed in the time.Duration format (see https://golang.org/pkg/time/#ParseDuration). + nonceexpiration: 15s + + # Specifies interval at which expired nonces are removed from datastore. Default value is 15 minutes. + # The value is expressed in the time.Duration format (see https://golang.org/pkg/time/#ParseDuration) + noncesweepinterval: 15m + +############################################################################# +# BCCSP (BlockChain Crypto Service Provider) section is used to select which +# crypto library implementation to use +############################################################################# +bccsp: + default: SW + sw: + hash: SHA2 + security: 256 + filekeystore: + # The directory used for the software file-based keystore + keystore: msp/keystore + +############################################################################# +# Multi CA section +# +# Each Fabric CA server contains one CA by default. This section is used +# to configure multiple CAs in a single server. +# +# 1) --cacount +# Automatically generate non-default CAs. The names of these +# additional CAs are "ca1", "ca2", ... "caN", where "N" is +# This is particularly useful in a development environment to quickly set up +# multiple CAs. Note that, this config option is not applicable to intermediate CA server +# i.e., Fabric CA server that is started with intermediate.parentserver.url config +# option (-u command line option) +# +# 2) --cafiles +# For each CA config file in the list, generate a separate signing CA. Each CA +# config file in this list MAY contain all of the same elements as are found in +# the server config file except port, debug, and tls sections. +# +# Examples: +# fabric-ca-server start -b admin:adminpw --cacount 2 +# +# fabric-ca-server start -b admin:adminpw --cafiles ca/ca1/fabric-ca-server-config.yaml +# --cafiles ca/ca2/fabric-ca-server-config.yaml +# +############################################################################# + +cacount: + +cafiles: + +############################################################################# +# Intermediate CA section +# +# The relationship between servers and CAs is as follows: +# 1) A single server process may contain or function as one or more CAs. +# This is configured by the "Multi CA section" above. +# 2) Each CA is either a root CA or an intermediate CA. +# 3) Each intermediate CA has a parent CA which is either a root CA or another intermediate CA. +# +# This section pertains to configuration of #2 and #3. +# If the "intermediate.parentserver.url" property is set, +# then this is an intermediate CA with the specified parent +# CA. +# +# parentserver section +# url - The URL of the parent server +# caname - Name of the CA to enroll within the server +# +# enrollment section used to enroll intermediate CA with parent CA +# profile - Name of the signing profile to use in issuing the certificate +# label - Label to use in HSM operations +# +# tls section for secure socket connection +# certfiles - PEM-encoded list of trusted root certificate files +# client: +# certfile - PEM-encoded certificate file for when client authentication +# is enabled on server +# keyfile - PEM-encoded key file for when client authentication +# is enabled on server +############################################################################# +intermediate: + parentserver: + url: + caname: + + enrollment: + hosts: + profile: + label: + + tls: + certfiles: + client: + certfile: + keyfile: + +############################################################################# +# CA configuration section +# +# Configure the number of incorrect password attempts are allowed for +# identities. By default, the value of 'passwordattempts' is 10, which +# means that 10 incorrect password attempts can be made before an identity get +# locked out. +############################################################################# +cfg: + identities: + passwordattempts: 10 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 0.0.0.0:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: true + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # require client certificate authentication to access all resources + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushsed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd merics + prefix: server diff --git a/defaultconfig/ca/tlsca.yaml b/defaultconfig/ca/tlsca.yaml new file mode 100644 index 00000000..d27ff682 --- /dev/null +++ b/defaultconfig/ca/tlsca.yaml @@ -0,0 +1,500 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################# +# This is a configuration file for the fabric-ca-server command. +# +# COMMAND LINE ARGUMENTS AND ENVIRONMENT VARIABLES +# ------------------------------------------------ +# Each configuration element can be overridden via command line +# arguments or environment variables. The precedence for determining +# the value of each element is as follows: +# 1) command line argument +# Examples: +# a) --port 443 +# To set the listening port +# b) --ca.keyfile ../mykey.pem +# To set the "keyfile" element in the "ca" section below; +# note the '.' separator character. +# 2) environment variable +# Examples: +# a) FABRIC_CA_SERVER_PORT=443 +# To set the listening port +# b) FABRIC_CA_SERVER_CA_KEYFILE="../mykey.pem" +# To set the "keyfile" element in the "ca" section below; +# note the '_' separator character. +# 3) configuration file +# 4) default value (if there is one) +# All default values are shown beside each element below. +# +# FILE NAME ELEMENTS +# ------------------ +# The value of all fields whose name ends with "file" or "files" are +# name or names of other files. +# For example, see "tls.certfile" and "tls.clientauth.certfiles". +# The value of each of these fields can be a simple filename, a +# relative path, or an absolute path. If the value is not an +# absolute path, it is interpretted as being relative to the location +# of this configuration file. +# +############################################################################# + +# Server's listening port (default: 7054) +port: 7054 + +# Cross-Origin Resource Sharing (CORS) +cors: + enabled: false + origins: + - "*" + +# Enables debug logging (default: false) +debug: false + +# Size limit of an acceptable CRL in bytes (default: 512000) +crlsizelimit: 512000 + +############################################################################# +# TLS section for the server's listening port +# +# The following types are supported for client authentication: NoClientCert, +# RequestClientCert, RequireAnyClientCert, VerifyClientCertIfGiven, +# and RequireAndVerifyClientCert. +# +# Certfiles is a list of root certificate authorities that the server uses +# when verifying client certificates. +############################################################################# +tls: + # Enable TLS (default: false) + enabled: true + # TLS for the server's listening port + certfile: + keyfile: + clientauth: + type: noclientcert + certfiles: + +############################################################################# +# The CA section contains information related to the Certificate Authority +# including the name of the CA, which should be unique for all members +# of a blockchain network. It also includes the key and certificate files +# used when issuing enrollment certificates (ECerts) and transaction +# certificates (TCerts). +# The chainfile (if it exists) contains the certificate chain which +# should be trusted for this CA, where the 1st in the chain is always the +# root CA certificate. +############################################################################# +ca: + # Name of this CA + name: tlsca + # Key file (is only used to import a private key into BCCSP) + keyfile: + # Certificate file (default: ca-cert.pem) + certfile: + # Chain file + chainfile: + +############################################################################# +# The gencrl REST endpoint is used to generate a CRL that contains revoked +# certificates. This section contains configuration options that are used +# during gencrl request processing. +############################################################################# +crl: + # Specifies expiration for the generated CRL. The number of hours + # specified by this property is added to the UTC time, the resulting time + # is used to set the 'Next Update' date of the CRL. + expiry: 24h + +############################################################################# +# The registry section controls how the fabric-ca-server does two things: +# 1) authenticates enrollment requests which contain a username and password +# (also known as an enrollment ID and secret). +# 2) once authenticated, retrieves the identity's attribute names and +# values which the fabric-ca-server optionally puts into TCerts +# which it issues for transacting on the Hyperledger Fabric blockchain. +# These attributes are useful for making access control decisions in +# chaincode. +# There are two main configuration options: +# 1) The fabric-ca-server is the registry. +# This is true if "ldap.enabled" in the ldap section below is false. +# 2) An LDAP server is the registry, in which case the fabric-ca-server +# calls the LDAP server to perform these tasks. +# This is true if "ldap.enabled" in the ldap section below is true, +# which means this "registry" section is ignored. +############################################################################# +registry: + # Maximum number of times a password/secret can be reused for enrollment + # (default: -1, which means there is no limit) + maxenrollments: -1 + + # Contains identity information which is used when LDAP is disabled + identities: + - name: admin + pass: adminpw + type: client + affiliation: "" + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + +############################################################################# +# Database section +# Supported types are: "sqlite3", "postgres", and "mysql". +# The datasource value depends on the type. +# If the type is "sqlite3", the datasource value is a file name to use +# as the database store. Since "sqlite3" is an embedded database, it +# may not be used if you want to run the fabric-ca-server in a cluster. +# To run the fabric-ca-server in a cluster, you must choose "postgres" +# or "mysql". +############################################################################# +db: + type: sqlite3 + datasource: fabric-ca-server.db + tls: + enabled: false + certfiles: + client: + certfile: + keyfile: + +############################################################################# +# LDAP section +# If LDAP is enabled, the fabric-ca-server calls LDAP to: +# 1) authenticate enrollment ID and secret (i.e. username and password) +# for enrollment requests; +# 2) To retrieve identity attributes +############################################################################# +ldap: + # Enables or disables the LDAP client (default: false) + # If this is set to true, the "registry" section is ignored. + enabled: false + # The URL of the LDAP server + url: ldap://:@:/ + # TLS configuration for the client connection to the LDAP server + tls: + certfiles: + client: + certfile: + keyfile: + # Attribute related configuration for mapping from LDAP entries to Fabric CA attributes + attribute: + # 'names' is an array of strings containing the LDAP attribute names which are + # requested from the LDAP server for an LDAP identity's entry + names: ['uid','member'] + # The 'converters' section is used to convert an LDAP entry to the value of + # a fabric CA attribute. + # For example, the following converts an LDAP 'uid' attribute + # whose value begins with 'revoker' to a fabric CA attribute + # named "hf.Revoker" with a value of "true" (because the boolean expression + # evaluates to true). + # converters: + # - name: hf.Revoker + # value: attr("uid") =~ "revoker*" + converters: + - name: + value: + # The 'maps' section contains named maps which may be referenced by the 'map' + # function in the 'converters' section to map LDAP responses to arbitrary values. + # For example, assume a user has an LDAP attribute named 'member' which has multiple + # values which are each a distinguished name (i.e. a DN). For simplicity, assume the + # values of the 'member' attribute are 'dn1', 'dn2', and 'dn3'. + # Further assume the following configuration. + # converters: + # - name: hf.Registrar.Roles + # value: map(attr("member"),"groups") + # maps: + # groups: + # - name: dn1 + # value: peer + # - name: dn2 + # value: client + # The value of the user's 'hf.Registrar.Roles' attribute is then computed to be + # "peer,client,dn3". This is because the value of 'attr("member")' is + # "dn1,dn2,dn3", and the call to 'map' with a 2nd argument of + # "group" replaces "dn1" with "peer" and "dn2" with "client". + maps: + groups: + - name: + value: + +############################################################################# +# Affiliations section. Fabric CA server can be bootstrapped with the +# affiliations specified in this section. Affiliations are specified as maps. +# For example: +# businessunit1: +# department1: +# - team1 +# businessunit2: +# - department2 +# - department3 +# +# Affiliations are hierarchical in nature. In the above example, +# department1 (used as businessunit1.department1) is the child of businessunit1. +# team1 (used as businessunit1.department1.team1) is the child of department1. +# department2 (used as businessunit2.department2) and department3 (businessunit2.department3) +# are children of businessunit2. +# Note: Affiliations are case sensitive except for the non-leaf affiliations +# (like businessunit1, department1, businessunit2) that are specified in the configuration file, +# which are always stored in lower case. +############################################################################# +affiliations: + +############################################################################# +# Signing section +# +# The "default" subsection is used to sign enrollment certificates; +# the default expiration ("expiry" field) is "8760h", which is 1 year in hours. +# +# The "ca" profile subsection is used to sign intermediate CA certificates; +# the default expiration ("expiry" field) is "43800h" which is 5 years in hours. +# Note that "isca" is true, meaning that it issues a CA certificate. +# A maxpathlen of 0 means that the intermediate CA cannot issue other +# intermediate CA certificates, though it can still issue end entity certificates. +# (See RFC 5280, section 4.2.1.9) +# +# The "tls" profile subsection is used to sign TLS certificate requests; +# the default expiration ("expiry" field) is "8760h", which is 1 year in hours. +############################################################################# +signing: + default: + usage: + - signing + - key encipherment + - server auth + - client auth + - key agreement + expiry: 131400h + +########################################################################### +# Certificate Signing Request (CSR) section. +# This controls the creation of the root CA certificate. +# The expiration for the root CA certificate is configured with the +# "ca.expiry" field below, whose default value is "131400h" which is +# 15 years in hours. +# The pathlength field is used to limit CA certificate hierarchy as described +# in section 4.2.1.9 of RFC 5280. +# Examples: +# 1) No pathlength value means no limit is requested. +# 2) pathlength == 1 means a limit of 1 is requested which is the default for +# a root CA. This means the root CA can issue intermediate CA certificates, +# but these intermediate CAs may not in turn issue other CA certificates +# though they can still issue end entity certificates. +# 3) pathlength == 0 means a limit of 0 is requested; +# this is the default for an intermediate CA, which means it can not issue +# CA certificates though it can still issue end entity certificates. +########################################################################### +csr: + cn: tlsca + keyrequest: + algo: ecdsa + size: 256 + names: + - C: US + ST: "North Carolina" + L: + O: Hyperledger + OU: Fabric + hosts: + - localhost + ca: + expiry: 131400h + pathlength: 1 + +########################################################################### +# Each CA can issue both X509 enrollment certificate as well as Idemix +# Credential. This section specifies configuration for the issuer component +# that is responsible for issuing Idemix credentials. +########################################################################### +idemix: + # Specifies pool size for revocation handles. A revocation handle is an unique identifier of an + # Idemix credential. The issuer will create a pool revocation handles of this specified size. When + # a credential is requested, issuer will get handle from the pool and assign it to the credential. + # Issuer will repopulate the pool with new handles when the last handle in the pool is used. + # A revocation handle and credential revocation information (CRI) are used to create non revocation proof + # by the prover to prove to the verifier that her credential is not revoked. + rhpoolsize: 1000 + + # The Idemix credential issuance is a two step process. First step is to get a nonce from the issuer + # and second step is send credential request that is constructed using the nonce to the isuser to + # request a credential. This configuration property specifies expiration for the nonces. By default is + # nonces expire after 15 seconds. The value is expressed in the time.Duration format (see https://golang.org/pkg/time/#ParseDuration). + nonceexpiration: 15s + + # Specifies interval at which expired nonces are removed from datastore. Default value is 15 minutes. + # The value is expressed in the time.Duration format (see https://golang.org/pkg/time/#ParseDuration) + noncesweepinterval: 15m + +############################################################################# +# BCCSP (BlockChain Crypto Service Provider) section is used to select which +# crypto library implementation to use +############################################################################# +bccsp: + default: SW + sw: + hash: SHA2 + security: 256 + filekeystore: + # The directory used for the software file-based keystore + keystore: msp/keystore + +############################################################################# +# Multi CA section +# +# Each Fabric CA server contains one CA by default. This section is used +# to configure multiple CAs in a single server. +# +# 1) --cacount +# Automatically generate non-default CAs. The names of these +# additional CAs are "ca1", "ca2", ... "caN", where "N" is +# This is particularly useful in a development environment to quickly set up +# multiple CAs. Note that, this config option is not applicable to intermediate CA server +# i.e., Fabric CA server that is started with intermediate.parentserver.url config +# option (-u command line option) +# +# 2) --cafiles +# For each CA config file in the list, generate a separate signing CA. Each CA +# config file in this list MAY contain all of the same elements as are found in +# the server config file except port, debug, and tls sections. +# +# Examples: +# fabric-ca-server start -b admin:adminpw --cacount 2 +# +# fabric-ca-server start -b admin:adminpw --cafiles ca/ca1/fabric-ca-server-config.yaml +# --cafiles ca/ca2/fabric-ca-server-config.yaml +# +############################################################################# + +cacount: + +cafiles: + +############################################################################# +# Intermediate CA section +# +# The relationship between servers and CAs is as follows: +# 1) A single server process may contain or function as one or more CAs. +# This is configured by the "Multi CA section" above. +# 2) Each CA is either a root CA or an intermediate CA. +# 3) Each intermediate CA has a parent CA which is either a root CA or another intermediate CA. +# +# This section pertains to configuration of #2 and #3. +# If the "intermediate.parentserver.url" property is set, +# then this is an intermediate CA with the specified parent +# CA. +# +# parentserver section +# url - The URL of the parent server +# caname - Name of the CA to enroll within the server +# +# enrollment section used to enroll intermediate CA with parent CA +# profile - Name of the signing profile to use in issuing the certificate +# label - Label to use in HSM operations +# +# tls section for secure socket connection +# certfiles - PEM-encoded list of trusted root certificate files +# client: +# certfile - PEM-encoded certificate file for when client authentication +# is enabled on server +# keyfile - PEM-encoded key file for when client authentication +# is enabled on server +############################################################################# +intermediate: + parentserver: + url: + caname: + + enrollment: + hosts: + profile: + label: + + tls: + certfiles: + client: + certfile: + keyfile: + +############################################################################# +# CA configuration section +# +# Configure the number of incorrect password attempts are allowed for +# identities. By default, the value of 'passwordattempts' is 10, which +# means that 10 incorrect password attempts can be made before an identity get +# locked out. +############################################################################# +cfg: + identities: + passwordattempts: 10 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 0.0.0.0:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: true + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # require client certificate authentication to access all resources + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushsed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd merics + prefix: server diff --git a/defaultconfig/console/console.go b/defaultconfig/console/console.go new file mode 100644 index 00000000..35603952 --- /dev/null +++ b/defaultconfig/console/console.go @@ -0,0 +1,36 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package console + +import "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + +func GetImages() *deployer.ConsoleImages { + return &deployer.ConsoleImages{ + ConsoleImage: "ghcr.io/hyperledger-labs/fabric-console", + ConsoleTag: "latest", + ConsoleInitImage: "registry.access.redhat.com/ubi8/ubi-minimal", + ConsoleInitTag: "latest", + ConfigtxlatorImage: "hyperledger/fabric-tools", + ConfigtxlatorTag: "2.2.5", + DeployerImage: "ghcr.io/ibm-blockchain/fabric-deployer", + DeployerTag: "latest", + CouchDBImage: "couchdb", + CouchDBTag: "3.1.2", + } +} diff --git a/defaultconfig/orderer/configtx.yaml b/defaultconfig/orderer/configtx.yaml new file mode 100644 index 00000000..f62c2f82 --- /dev/null +++ b/defaultconfig/orderer/configtx.yaml @@ -0,0 +1,240 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Capabilities: + # Channel capabilities apply to both the orderers and the peers and must be + # supported by both. + # Set the value of the capability to true to require it. + Channel: &ChannelCapabilities + V1_4_3: true + V1_3: false + V1_1: false + + # Orderer capabilities apply only to the orderers, and may be safely + # used with prior release peers. + # Set the value of the capability to true to require it. + Orderer: &OrdererCapabilities + V1_4_2: true + V1_1: false + + # Application capabilities apply only to the peer network, and may be safely + # used with prior release orderers. + # Set the value of the capability to true to require it. + Application: &ApplicationCapabilities + # V1.4.2 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.4.2 + V1_4_2: true + # V1.3 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.3. + V1_3: false + # V1.2 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.2 (note, this need not be set if + # later version capabilities are set) + V1_2: false + # V1.1 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.1 (note, this need not be set if + # later version capabilities are set). + V1_1: false + +################################################################################ +# +# ORGANIZATIONS +# +# This section defines the organizational identities that can be referenced +# in the configuration profiles. +# +################################################################################ +Organizations: +################################################################################ +# +# ORDERER +# +# This section defines the values to encode into a config transaction or +# genesis block for orderer related parameters. +# + # - ibpv2-test-cluster.us-south.containers.appdomain.cloud:32356 + # - Host: ibpv2-test-cluster.us-south.containers.appdomain.cloud + # Port: 32356 + # ClientTLSCert: /certs/tls/cert.pem + # ServerTLSCert: /certs/tls/cert.pem +################################################################################ +Orderer: &OrdererDefaults + + OrdererType: etcdraft + Addresses: + BatchTimeout: 2s + BatchSize: + + MaxChannels: 0 + Kafka: + Brokers: + + EtcdRaft: + Consenters: + Options: + + Organizations: + + Policies: + + # Capabilities describes the orderer level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *OrdererCapabilities + +################################################################################ +# +# APPLICATION +# +# This section defines the values to encode into a config transaction or +# genesis block for application-related parameters. +# +################################################################################ +Application: &ApplicationDefaults + ACLs: &ACLsDefault + # This section provides defaults for policies for various resources + # in the system. These "resources" could be functions on system chaincodes + # (e.g., "GetBlockByNumber" on the "qscc" system chaincode) or other resources + # (e.g.,who can receive Block events). This section does NOT specify the resource's + # definition or API, but just the ACL policy for it. + # + # User's can override these defaults with their own policy mapping by defining the + # mapping under ACLs in their channel definition + + #---Lifecycle System Chaincode (lscc) function to policy mapping for access control---# + + # ACL policy for lscc's "getid" function + lscc/ChaincodeExists: /Channel/Application/Readers + + # ACL policy for lscc's "getdepspec" function + lscc/GetDeploymentSpec: /Channel/Application/Readers + + # ACL policy for lscc's "getccdata" function + lscc/GetChaincodeData: /Channel/Application/Readers + + # ACL Policy for lscc's "getchaincodes" function + lscc/GetInstantiatedChaincodes: /Channel/Application/Readers + + #---Query System Chaincode (qscc) function to policy mapping for access control---# + + # ACL policy for qscc's "GetChainInfo" function + qscc/GetChainInfo: /Channel/Application/Readers + + # ACL policy for qscc's "GetBlockByNumber" function + qscc/GetBlockByNumber: /Channel/Application/Readers + + # ACL policy for qscc's "GetBlockByHash" function + qscc/GetBlockByHash: /Channel/Application/Readers + + # ACL policy for qscc's "GetTransactionByID" function + qscc/GetTransactionByID: /Channel/Application/Readers + + # ACL policy for qscc's "GetBlockByTxID" function + qscc/GetBlockByTxID: /Channel/Application/Readers + + #---Configuration System Chaincode (cscc) function to policy mapping for access control---# + + # ACL policy for cscc's "GetConfigBlock" function + cscc/GetConfigBlock: /Channel/Application/Readers + + # ACL policy for cscc's "GetConfigTree" function + cscc/GetConfigTree: /Channel/Application/Readers + + # ACL policy for cscc's "SimulateConfigTreeUpdate" function + cscc/SimulateConfigTreeUpdate: /Channel/Application/Readers + + #---Miscellanesous peer function to policy mapping for access control---# + + # ACL policy for invoking chaincodes on peer + peer/Propose: /Channel/Application/Writers + + # ACL policy for chaincode to chaincode invocation + peer/ChaincodeToChaincode: /Channel/Application/Readers + + #---Events resource to policy mapping for access control###---# + + # ACL policy for sending block events + event/Block: /Channel/Application/Readers + + # ACL policy for sending filtered block events + event/FilteredBlock: /Channel/Application/Readers + + # Organizations lists the orgs participating on the application side of the + # network. + Organizations: + + # Policies defines the set of policies at this level of the config tree + # For Application policies, their canonical path is + # /Channel/Application/ + Policies: &ApplicationDefaultPolicies + + # Capabilities describes the application level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *ApplicationCapabilities + +################################################################################ +# +# CHANNEL +# +# This section defines the values to encode into a config transaction or +# genesis block for channel related parameters. +# +################################################################################ +Channel: &ChannelDefaults + # Policies defines the set of policies at this level of the config tree + # For Channel policies, their canonical path is + # /Channel/ + Policies: + + + # Capabilities describes the channel level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *ChannelCapabilities + +################################################################################ +# +# PROFILES +# +# Different configuration profiles may be encoded here to be specified as +# parameters to the configtxgen tool. The profiles which specify consortiums +# are to be used for generating the orderer genesis block. With the correct +# consortium members defined in the orderer genesis block, channel creation +# requests may be generated with only the org member names and a consortium +# name. +# +################################################################################ +Profiles: + Initial: + <<: *ChannelDefaults + Orderer: + <<: *OrdererDefaults + OrdererType: etcdraft + Organizations: + + Consortiums: + Channel: + <<: *ChannelDefaults + Consortium: SampleConsortium + Application: + <<: *ApplicationDefaults + Organizations: diff --git a/defaultconfig/orderer/orderer.yaml b/defaultconfig/orderer/orderer.yaml new file mode 100644 index 00000000..1c22dd84 --- /dev/null +++ b/defaultconfig/orderer/orderer.yaml @@ -0,0 +1,402 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +################################################################################ +# +# Orderer Configuration +# +# - This controls the type and configuration of the orderer. +# +################################################################################ +General: + + # Ledger Type: The ledger type to provide to the orderer. + # Two non-production ledger types are provided for test purposes only: + # - ram: An in-memory ledger whose contents are lost on restart. + # - json: A simple file ledger that writes blocks to disk in JSON format. + # Only one production ledger type is provided: + # - file: A production file-based ledger. + LedgerType: file + + # Listen address: The IP on which to bind to listen. + ListenAddress: 127.0.0.1 + + # Listen port: The port on which to bind to listen. + ListenPort: 7050 + + # TLS: TLS settings for the GRPC server. + TLS: + Enabled: false + # PrivateKey governs the file location of the private key of the TLS certificate. + PrivateKey: tls/server.key + # Certificate governs the file location of the server TLS certificate. + Certificate: tls/server.crt + RootCAs: + - tls/ca.crt + ClientAuthRequired: false + ClientRootCAs: + # Keepalive settings for the GRPC server. + Keepalive: + # ServerMinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the server will + # disconnect them. + ServerMinInterval: 60s + # ServerInterval is the time between pings to clients. + ServerInterval: 7200s + # ServerTimeout is the duration the server waits for a response from + # a client before closing the connection. + ServerTimeout: 20s + # Cluster settings for ordering service nodes that communicate with other ordering service nodes + # such as Raft based ordering service. + Cluster: + # SendBufferSize is the maximum number of messages in the egress buffer. + # Consensus messages are dropped if the buffer is full, and transaction + # messages are waiting for space to be freed. + SendBufferSize: 10 + # ClientCertificate governs the file location of the client TLS certificate + # used to establish mutual TLS connections with other ordering service nodes. + ClientCertificate: + # ClientPrivateKey governs the file location of the private key of the client TLS certificate. + ClientPrivateKey: + # The below 4 properties should be either set together, or be unset together. + # If they are set, then the orderer node uses a separate listener for intra-cluster + # communication. If they are unset, then the general orderer listener is used. + # This is useful if you want to use a different TLS server certificates on the + # client-facing and the intra-cluster listeners. + + # ListenPort defines the port on which the cluster listens to connections. + ListenPort: + # ListenAddress defines the IP on which to listen to intra-cluster communication. + ListenAddress: + # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster + # communication. + ServerCertificate: + # ServerPrivateKey defines the file location of the private key of the TLS certificate. + ServerPrivateKey: + # Genesis method: The method by which the genesis block for the orderer + # system channel is specified. Available options are "provisional", "file": + # - provisional: Utilizes a genesis profile, specified by GenesisProfile, + # to dynamically generate a new genesis block. + # - file: Uses the file provided by GenesisFile as the genesis block. + GenesisMethod: provisional + + # Genesis profile: The profile to use to dynamically generate the genesis + # block to use when initializing the orderer system channel and + # GenesisMethod is set to "provisional". See the configtx.yaml file for the + # descriptions of the available profiles. Ignored if GenesisMethod is set to + # "file". + GenesisProfile: SampleInsecureSolo + + # Genesis file: The file containing the genesis block to use when + # initializing the orderer system channel and GenesisMethod is set to + # "file". Ignored if GenesisMethod is set to "provisional". + GenesisFile: genesisblock + + # LocalMSPDir is where to find the private crypto material needed by the + # orderer. It is set relative here as a default for dev environments but + # should be changed to the real location in production. + LocalMSPDir: msp + + # LocalMSPID is the identity to register the local MSP material with the MSP + # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP + # ID of one of the organizations defined in the orderer system channel's + # /Channel/Orderer configuration. The sample organization defined in the + # sample configuration provided has an MSP ID of "SampleOrg". + LocalMSPID: SampleOrg + + # Enable an HTTP service for Go "pprof" profiling as documented at: + # https://golang.org/pkg/net/http/pprof + Profile: + Enabled: false + Address: 0.0.0.0:6060 + + # BCCSP configures the blockchain crypto service providers. + BCCSP: + # Default specifies the preferred blockchain crypto service provider + # to use. If the preferred provider is not available, the software + # based provider ("SW") will be used. + # Valid providers are: + # - SW: a software based crypto provider + # - PKCS11: a CA hardware security module crypto provider. + Default: SW + + # SW configures the software based blockchain crypto provider. + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of key store. If this is unset, a location will be + # chosen using: 'LocalMSPDir'/keystore + FileKeyStore: + KeyStore: + + # # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + # FileKeyStore: + # KeyStore: + + # Authentication contains configuration parameters related to authenticating + # client messages + Authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + TimeWindow: 15m + + +################################################################################ +# +# SECTION: File Ledger +# +# - This section applies to the configuration of the file or json ledgers. +# +################################################################################ +FileLedger: + + # Location: The directory to store the blocks in. + # NOTE: If this is unset, a new temporary location will be chosen every time + # the orderer is restarted, using the prefix specified by Prefix. + Location: /var/hyperledger/production/orderer + + # The prefix to use when generating a ledger directory in temporary space. + # Otherwise, this value is ignored. + Prefix: hyperledger-fabric-ordererledger + +################################################################################ +# +# SECTION: RAM Ledger +# +# - This section applies to the configuration of the RAM ledger. +# +################################################################################ +RAMLedger: + + # History Size: The number of blocks that the RAM ledger is set to retain. + # WARNING: Appending a block to the ledger might cause the oldest block in + # the ledger to be dropped in order to limit the number total number blocks + # to HistorySize. For example, if history size is 10, when appending block + # 10, block 0 (the genesis block!) will be dropped to make room for block 10. + HistorySize: 1000 + +################################################################################ +# +# SECTION: Kafka +# +# - This section applies to the configuration of the Kafka-based orderer, and +# its interaction with the Kafka cluster. +# +################################################################################ +Kafka: + + # Retry: What do if a connection to the Kafka cluster cannot be established, + # or if a metadata request to the Kafka cluster needs to be repeated. + Retry: + # When a new channel is created, or when an existing channel is reloaded + # (in case of a just-restarted orderer), the orderer interacts with the + # Kafka cluster in the following ways: + # 1. It creates a Kafka producer (writer) for the Kafka partition that + # corresponds to the channel. + # 2. It uses that producer to post a no-op CONNECT message to that + # partition + # 3. It creates a Kafka consumer (reader) for that partition. + # If any of these steps fail, they will be re-attempted every + # for a total of , and then every + # for a total of until they succeed. + # Note that the orderer will be unable to write to or read from a + # channel until all of the steps above have been completed successfully. + ShortInterval: 5s + ShortTotal: 10m + LongInterval: 5m + LongTotal: 12h + # Affects the socket timeouts when waiting for an initial connection, a + # response, or a transmission. See Config.Net for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + NetworkTimeouts: + DialTimeout: 10s + ReadTimeout: 10s + WriteTimeout: 10s + # Affects the metadata requests when the Kafka cluster is in the middle + # of a leader election.See Config.Metadata for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Metadata: + RetryBackoff: 250ms + RetryMax: 3 + # What to do if posting a message to the Kafka cluster fails. See + # Config.Producer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Producer: + RetryBackoff: 100ms + RetryMax: 3 + # What to do if reading from the Kafka cluster fails. See + # Config.Consumer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Consumer: + RetryBackoff: 2s + # Settings to use when creating Kafka topics. Only applies when + # Kafka.Version is v0.10.1.0 or higher + Topic: + # The number of Kafka brokers across which to replicate the topic + ReplicationFactor: 3 + # Verbose: Enable logging for interactions with the Kafka cluster. + Verbose: false + + # TLS: TLS settings for the orderer's connection to the Kafka cluster. + TLS: + + # Enabled: Use TLS when connecting to the Kafka cluster. + Enabled: false + + # PrivateKey: PEM-encoded private key the orderer will use for + # authentication. + PrivateKey: + # As an alternative to specifying the PrivateKey here, uncomment the + # following "File" key and specify the file name from which to load the + # value of PrivateKey. + #File: path/to/PrivateKey + + # Certificate: PEM-encoded signed public key certificate the orderer will + # use for authentication. + Certificate: + # As an alternative to specifying the Certificate here, uncomment the + # following "File" key and specify the file name from which to load the + # value of Certificate. + #File: path/to/Certificate + + # RootCAs: PEM-encoded trusted root certificates used to validate + # certificates from the Kafka cluster. + RootCAs: + # As an alternative to specifying the RootCAs here, uncomment the + # following "File" key and specify the file name from which to load the + # value of RootCAs. + #File: path/to/RootCAs + + # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers + SASLPlain: + # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers + Enabled: false + # User: Required when Enabled is set to true + User: + # Password: Required when Enabled is set to true + Password: + + # Kafka protocol version used to communicate with the Kafka cluster brokers + # (defaults to 0.10.2.0 if not specified) + Version: + +################################################################################ +# +# Debug Configuration +# +# - This controls the debugging options for the orderer +# +################################################################################ +Debug: + + # BroadcastTraceDir when set will cause each request to the Broadcast service + # for this orderer to be written to a file in this directory + BroadcastTraceDir: + + # DeliverTraceDir when set will cause each request to the Deliver service + # for this orderer to be written to a file in this directory + DeliverTraceDir: + +################################################################################ +# +# Operations Configuration +# +# - This configures the operations server endpoint for the orderer +# +################################################################################ +Operations: + # host and port for the operations server + ListenAddress: 127.0.0.1:8443 + + # TLS configuration for the operations endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most operations service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + ClientAuthRequired: false + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Metrics Configuration +# +# - This configures metrics collection for the orderer +# +################################################################################ +Metrics: + # The metrics provider is one of statsd, prometheus, or disabled + Provider: prometheus + + # The statsd configuration + Statsd: + # network type: tcp or udp + Network: udp + + # the statsd server address + Address: 127.0.0.1:8125 + + # The interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + WriteInterval: 30s + + # The prefix is prepended to all emitted statsd metrics + Prefix: + +################################################################################ +# +# Consensus Configuration +# +# - This section contains config options for a consensus plugin. It is opaque +# to orderer, and completely up to consensus implementation to make use of. +# +################################################################################ +Consensus: + # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, + # we use following options: + + # WALDir specifies the location at which Write Ahead Logs for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + WALDir: /var/hyperledger/production/orderer/etcdraft/wal + + # SnapDir specifies the location at which snapshots for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot diff --git a/defaultconfig/orderer/ouconfig-inter.yaml b/defaultconfig/orderer/ouconfig-inter.yaml new file mode 100644 index 00000000..20b2023a --- /dev/null +++ b/defaultconfig/orderer/ouconfig-inter.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +NodeOUs: + Enable: true + ClientOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: client + PeerOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: peer + AdminOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: admin + OrdererOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: orderer diff --git a/defaultconfig/orderer/ouconfig.yaml b/defaultconfig/orderer/ouconfig.yaml new file mode 100644 index 00000000..f2c00043 --- /dev/null +++ b/defaultconfig/orderer/ouconfig.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +NodeOUs: + Enable: true + ClientOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: client + PeerOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: peer + AdminOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: admin + OrdererOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: orderer diff --git a/defaultconfig/orderer/v2/orderer.yaml b/defaultconfig/orderer/v2/orderer.yaml new file mode 100644 index 00000000..b4963637 --- /dev/null +++ b/defaultconfig/orderer/v2/orderer.yaml @@ -0,0 +1,374 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# +# Orderer Configuration +# +# - This controls the type and configuration of the orderer. +# +################################################################################ +General: + # Listen address: The IP on which to bind to listen. + ListenAddress: 127.0.0.1 + + # Listen port: The port on which to bind to listen. + ListenPort: 7050 + + # TLS: TLS settings for the GRPC server. + TLS: + Enabled: false + # PrivateKey governs the file location of the private key of the TLS certificate. + PrivateKey: tls/server.key + # Certificate governs the file location of the server TLS certificate. + Certificate: tls/server.crt + RootCAs: + - tls/ca.crt + ClientAuthRequired: false + ClientRootCAs: + # Keepalive settings for the GRPC server. + Keepalive: + # ServerMinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the server will + # disconnect them. + ServerMinInterval: 60s + # ServerInterval is the time between pings to clients. + ServerInterval: 7200s + # ServerTimeout is the duration the server waits for a response from + # a client before closing the connection. + ServerTimeout: 20s + # Cluster settings for ordering service nodes that communicate with other ordering service nodes + # such as Raft based ordering service. + Cluster: + # SendBufferSize is the maximum number of messages in the egress buffer. + # Consensus messages are dropped if the buffer is full, and transaction + # messages are waiting for space to be freed. + SendBufferSize: 10 + # ClientCertificate governs the file location of the client TLS certificate + # used to establish mutual TLS connections with other ordering service nodes. + ClientCertificate: + # ClientPrivateKey governs the file location of the private key of the client TLS certificate. + ClientPrivateKey: + # The below 4 properties should be either set together, or be unset together. + # If they are set, then the orderer node uses a separate listener for intra-cluster + # communication. If they are unset, then the general orderer listener is used. + # This is useful if you want to use a different TLS server certificates on the + # client-facing and the intra-cluster listeners. + + # ListenPort defines the port on which the cluster listens to connections. + ListenPort: + # ListenAddress defines the IP on which to listen to intra-cluster communication. + ListenAddress: + # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster + # communication. + ServerCertificate: + # ServerPrivateKey defines the file location of the private key of the TLS certificate. + ServerPrivateKey: + + # Bootstrap method: The method by which to obtain the bootstrap block + # system channel is specified. The option can be one of: + # "file" - path to a file containing the genesis block or config block of system channel + # "none" - allows an orderer to start without a system channel configuration + BootstrapMethod: file + + # Bootstrap file: The file containing the bootstrap block to use when + # initializing the orderer system channel and BootstrapMethod is set to + # "file". The bootstrap file can be the genesis block, and it can also be + # a config block for late bootstrap of some consensus methods like Raft. + # Generate a genesis block by updating $FABRIC_CFG_PATH/configtx.yaml and + # using configtxgen command with "-outputBlock" option. + # Defaults to file "genesisblock" (in $FABRIC_CFG_PATH directory) if not specified. + BootstrapFile: + + # LocalMSPDir is where to find the private crypto material needed by the + # orderer. It is set relative here as a default for dev environments but + # should be changed to the real location in production. + LocalMSPDir: msp + + # LocalMSPID is the identity to register the local MSP material with the MSP + # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP + # ID of one of the organizations defined in the orderer system channel's + # /Channel/Orderer configuration. The sample organization defined in the + # sample configuration provided has an MSP ID of "SampleOrg". + LocalMSPID: SampleOrg + + # Enable an HTTP service for Go "pprof" profiling as documented at: + # https://golang.org/pkg/net/http/pprof + Profile: + Enabled: false + Address: 0.0.0.0:6060 + + # BCCSP configures the blockchain crypto service providers. + BCCSP: + # Default specifies the preferred blockchain crypto service provider + # to use. If the preferred provider is not available, the software + # based provider ("SW") will be used. + # Valid providers are: + # - SW: a software based crypto provider + # - PKCS11: a CA hardware security module crypto provider. + Default: SW + + # SW configures the software based blockchain crypto provider. + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of key store. If this is unset, a location will be + # chosen using: 'LocalMSPDir'/keystore + FileKeyStore: + KeyStore: + + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + # FileKeyStore: + # KeyStore: + + # Authentication contains configuration parameters related to authenticating + # client messages + Authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + TimeWindow: 15m + + +################################################################################ +# +# SECTION: File Ledger +# +# - This section applies to the configuration of the file or json ledgers. +# +################################################################################ +FileLedger: + + # Location: The directory to store the blocks in. + # NOTE: If this is unset, a new temporary location will be chosen every time + # the orderer is restarted, using the prefix specified by Prefix. + Location: /var/hyperledger/production/orderer + + # The prefix to use when generating a ledger directory in temporary space. + # Otherwise, this value is ignored. + Prefix: hyperledger-fabric-ordererledger + +################################################################################ +# +# SECTION: Kafka +# +# - This section applies to the configuration of the Kafka-based orderer, and +# its interaction with the Kafka cluster. +# +################################################################################ +Kafka: + + # Retry: What do if a connection to the Kafka cluster cannot be established, + # or if a metadata request to the Kafka cluster needs to be repeated. + Retry: + # When a new channel is created, or when an existing channel is reloaded + # (in case of a just-restarted orderer), the orderer interacts with the + # Kafka cluster in the following ways: + # 1. It creates a Kafka producer (writer) for the Kafka partition that + # corresponds to the channel. + # 2. It uses that producer to post a no-op CONNECT message to that + # partition + # 3. It creates a Kafka consumer (reader) for that partition. + # If any of these steps fail, they will be re-attempted every + # for a total of , and then every + # for a total of until they succeed. + # Note that the orderer will be unable to write to or read from a + # channel until all of the steps above have been completed successfully. + ShortInterval: 5s + ShortTotal: 10m + LongInterval: 5m + LongTotal: 12h + # Affects the socket timeouts when waiting for an initial connection, a + # response, or a transmission. See Config.Net for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + NetworkTimeouts: + DialTimeout: 10s + ReadTimeout: 10s + WriteTimeout: 10s + # Affects the metadata requests when the Kafka cluster is in the middle + # of a leader election.See Config.Metadata for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Metadata: + RetryBackoff: 250ms + RetryMax: 3 + # What to do if posting a message to the Kafka cluster fails. See + # Config.Producer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Producer: + RetryBackoff: 100ms + RetryMax: 3 + # What to do if reading from the Kafka cluster fails. See + # Config.Consumer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Consumer: + RetryBackoff: 2s + # Settings to use when creating Kafka topics. Only applies when + # Kafka.Version is v0.10.1.0 or higher + Topic: + # The number of Kafka brokers across which to replicate the topic + ReplicationFactor: 3 + # Verbose: Enable logging for interactions with the Kafka cluster. + Verbose: false + + # TLS: TLS settings for the orderer's connection to the Kafka cluster. + TLS: + + # Enabled: Use TLS when connecting to the Kafka cluster. + Enabled: false + + # PrivateKey: PEM-encoded private key the orderer will use for + # authentication. + PrivateKey: + # As an alternative to specifying the PrivateKey here, uncomment the + # following "File" key and specify the file name from which to load the + # value of PrivateKey. + #File: path/to/PrivateKey + + # Certificate: PEM-encoded signed public key certificate the orderer will + # use for authentication. + Certificate: + # As an alternative to specifying the Certificate here, uncomment the + # following "File" key and specify the file name from which to load the + # value of Certificate. + #File: path/to/Certificate + + # RootCAs: PEM-encoded trusted root certificates used to validate + # certificates from the Kafka cluster. + RootCAs: + # As an alternative to specifying the RootCAs here, uncomment the + # following "File" key and specify the file name from which to load the + # value of RootCAs. + #File: path/to/RootCAs + + # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers + SASLPlain: + # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers + Enabled: false + # User: Required when Enabled is set to true + User: + # Password: Required when Enabled is set to true + Password: + + # Kafka protocol version used to communicate with the Kafka cluster brokers + # (defaults to 0.10.2.0 if not specified) + Version: + +################################################################################ +# +# Debug Configuration +# +# - This controls the debugging options for the orderer +# +################################################################################ +Debug: + + # BroadcastTraceDir when set will cause each request to the Broadcast service + # for this orderer to be written to a file in this directory + BroadcastTraceDir: + + # DeliverTraceDir when set will cause each request to the Deliver service + # for this orderer to be written to a file in this directory + DeliverTraceDir: + +################################################################################ +# +# Operations Configuration +# +# - This configures the operations server endpoint for the orderer +# +################################################################################ +Operations: + # host and port for the operations server + ListenAddress: 127.0.0.1:8443 + + # TLS configuration for the operations endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most operations service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + ClientAuthRequired: false + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Metrics Configuration +# +# - This configures metrics collection for the orderer +# +################################################################################ +Metrics: + # The metrics provider is one of statsd, prometheus, or disabled + Provider: prometheus + + # The statsd configuration + Statsd: + # network type: tcp or udp + Network: udp + + # the statsd server address + Address: 127.0.0.1:8125 + + # The interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + WriteInterval: 30s + + # The prefix is prepended to all emitted statsd metrics + Prefix: + + +################################################################################ +# +# Consensus Configuration +# +# - This section contains config options for a consensus plugin. It is opaque +# to orderer, and completely up to consensus implementation to make use of. +# +################################################################################ +Consensus: + # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, + # we use following options: + + # WALDir specifies the location at which Write Ahead Logs for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + WALDir: /var/hyperledger/production/orderer/etcdraft/wal + + # SnapDir specifies the location at which snapshots for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot diff --git a/defaultconfig/orderer/v24/orderer.yaml b/defaultconfig/orderer/v24/orderer.yaml new file mode 100644 index 00000000..72cd86b9 --- /dev/null +++ b/defaultconfig/orderer/v24/orderer.yaml @@ -0,0 +1,420 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# +# Orderer Configuration +# +# - This controls the type and configuration of the orderer. +# +################################################################################ +General: + # Listen address: The IP on which to bind to listen. + ListenAddress: 127.0.0.1 + + # Listen port: The port on which to bind to listen. + ListenPort: 7050 + + # TLS: TLS settings for the GRPC server. + TLS: + Enabled: false + # PrivateKey governs the file location of the private key of the TLS certificate. + PrivateKey: tls/server.key + # Certificate governs the file location of the server TLS certificate. + Certificate: tls/server.crt + RootCAs: + - tls/ca.crt + ClientAuthRequired: false + ClientRootCAs: + # Keepalive settings for the GRPC server. + Keepalive: + # ServerMinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the server will + # disconnect them. + ServerMinInterval: 60s + # ServerInterval is the time between pings to clients. + ServerInterval: 7200s + # ServerTimeout is the duration the server waits for a response from + # a client before closing the connection. + ServerTimeout: 20s + # Cluster settings for ordering service nodes that communicate with other ordering service nodes + # such as Raft based ordering service. + Cluster: + # SendBufferSize is the maximum number of messages in the egress buffer. + # Consensus messages are dropped if the buffer is full, and transaction + # messages are waiting for space to be freed. + SendBufferSize: 10 + # ClientCertificate governs the file location of the client TLS certificate + # used to establish mutual TLS connections with other ordering service nodes. + ClientCertificate: + # ClientPrivateKey governs the file location of the private key of the client TLS certificate. + ClientPrivateKey: + # The below 4 properties should be either set together, or be unset together. + # If they are set, then the orderer node uses a separate listener for intra-cluster + # communication. If they are unset, then the general orderer listener is used. + # This is useful if you want to use a different TLS server certificates on the + # client-facing and the intra-cluster listeners. + + # ListenPort defines the port on which the cluster listens to connections. + ListenPort: + # ListenAddress defines the IP on which to listen to intra-cluster communication. + ListenAddress: + # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster + # communication. + ServerCertificate: + # ServerPrivateKey defines the file location of the private key of the TLS certificate. + ServerPrivateKey: + + # Bootstrap method: The method by which to obtain the bootstrap block + # system channel is specified. The option can be one of: + # "file" - path to a file containing the genesis block or config block of system channel + # "none" - allows an orderer to start without a system channel configuration + BootstrapMethod: file + + # Bootstrap file: The file containing the bootstrap block to use when + # initializing the orderer system channel and BootstrapMethod is set to + # "file". The bootstrap file can be the genesis block, and it can also be + # a config block for late bootstrap of some consensus methods like Raft. + # Generate a genesis block by updating $FABRIC_CFG_PATH/configtx.yaml and + # using configtxgen command with "-outputBlock" option. + # Defaults to file "genesisblock" (in $FABRIC_CFG_PATH directory) if not specified. + BootstrapFile: + + # LocalMSPDir is where to find the private crypto material needed by the + # orderer. It is set relative here as a default for dev environments but + # should be changed to the real location in production. + LocalMSPDir: msp + + # LocalMSPID is the identity to register the local MSP material with the MSP + # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP + # ID of one of the organizations defined in the orderer system channel's + # /Channel/Orderer configuration. The sample organization defined in the + # sample configuration provided has an MSP ID of "SampleOrg". + LocalMSPID: SampleOrg + + # Enable an HTTP service for Go "pprof" profiling as documented at: + # https://golang.org/pkg/net/http/pprof + Profile: + Enabled: false + Address: 0.0.0.0:6060 + + # BCCSP configures the blockchain crypto service providers. + BCCSP: + # Default specifies the preferred blockchain crypto service provider + # to use. If the preferred provider is not available, the software + # based provider ("SW") will be used. + # Valid providers are: + # - SW: a software based crypto provider + # - PKCS11: a CA hardware security module crypto provider. + Default: SW + + # SW configures the software based blockchain crypto provider. + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of key store. If this is unset, a location will be + # chosen using: 'LocalMSPDir'/keystore + FileKeyStore: + KeyStore: + + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + # FileKeyStore: + # KeyStore: + + # Authentication contains configuration parameters related to authenticating + # client messages + Authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + TimeWindow: 15m + + +################################################################################ +# +# SECTION: File Ledger +# +# - This section applies to the configuration of the file or json ledgers. +# +################################################################################ +FileLedger: + + # Location: The directory to store the blocks in. + # NOTE: If this is unset, a new temporary location will be chosen every time + # the orderer is restarted, using the prefix specified by Prefix. + Location: /var/hyperledger/production/orderer + +################################################################################ +# +# SECTION: Kafka +# +# - This section applies to the configuration of the Kafka-based orderer, and +# its interaction with the Kafka cluster. +# +################################################################################ +Kafka: + + # Retry: What do if a connection to the Kafka cluster cannot be established, + # or if a metadata request to the Kafka cluster needs to be repeated. + Retry: + # When a new channel is created, or when an existing channel is reloaded + # (in case of a just-restarted orderer), the orderer interacts with the + # Kafka cluster in the following ways: + # 1. It creates a Kafka producer (writer) for the Kafka partition that + # corresponds to the channel. + # 2. It uses that producer to post a no-op CONNECT message to that + # partition + # 3. It creates a Kafka consumer (reader) for that partition. + # If any of these steps fail, they will be re-attempted every + # for a total of , and then every + # for a total of until they succeed. + # Note that the orderer will be unable to write to or read from a + # channel until all of the steps above have been completed successfully. + ShortInterval: 5s + ShortTotal: 10m + LongInterval: 5m + LongTotal: 12h + # Affects the socket timeouts when waiting for an initial connection, a + # response, or a transmission. See Config.Net for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + NetworkTimeouts: + DialTimeout: 10s + ReadTimeout: 10s + WriteTimeout: 10s + # Affects the metadata requests when the Kafka cluster is in the middle + # of a leader election.See Config.Metadata for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Metadata: + RetryBackoff: 250ms + RetryMax: 3 + # What to do if posting a message to the Kafka cluster fails. See + # Config.Producer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Producer: + RetryBackoff: 100ms + RetryMax: 3 + # What to do if reading from the Kafka cluster fails. See + # Config.Consumer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Consumer: + RetryBackoff: 2s + # Settings to use when creating Kafka topics. Only applies when + # Kafka.Version is v0.10.1.0 or higher + Topic: + # The number of Kafka brokers across which to replicate the topic + ReplicationFactor: 3 + # Verbose: Enable logging for interactions with the Kafka cluster. + Verbose: false + + # TLS: TLS settings for the orderer's connection to the Kafka cluster. + TLS: + + # Enabled: Use TLS when connecting to the Kafka cluster. + Enabled: false + + # PrivateKey: PEM-encoded private key the orderer will use for + # authentication. + PrivateKey: + # As an alternative to specifying the PrivateKey here, uncomment the + # following "File" key and specify the file name from which to load the + # value of PrivateKey. + #File: path/to/PrivateKey + + # Certificate: PEM-encoded signed public key certificate the orderer will + # use for authentication. + Certificate: + # As an alternative to specifying the Certificate here, uncomment the + # following "File" key and specify the file name from which to load the + # value of Certificate. + #File: path/to/Certificate + + # RootCAs: PEM-encoded trusted root certificates used to validate + # certificates from the Kafka cluster. + RootCAs: + # As an alternative to specifying the RootCAs here, uncomment the + # following "File" key and specify the file name from which to load the + # value of RootCAs. + #File: path/to/RootCAs + + # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers + SASLPlain: + # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers + Enabled: false + # User: Required when Enabled is set to true + User: + # Password: Required when Enabled is set to true + Password: + + # Kafka protocol version used to communicate with the Kafka cluster brokers + # (defaults to 0.10.2.0 if not specified) + Version: + +################################################################################ +# +# Debug Configuration +# +# - This controls the debugging options for the orderer +# +################################################################################ +Debug: + + # BroadcastTraceDir when set will cause each request to the Broadcast service + # for this orderer to be written to a file in this directory + BroadcastTraceDir: + + # DeliverTraceDir when set will cause each request to the Deliver service + # for this orderer to be written to a file in this directory + DeliverTraceDir: + +################################################################################ +# +# Operations Configuration +# +# - This configures the operations server endpoint for the orderer +# +################################################################################ +Operations: + # host and port for the operations server + ListenAddress: 127.0.0.1:8443 + + # TLS configuration for the operations endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most operations service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + ClientAuthRequired: false + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Metrics Configuration +# +# - This configures metrics collection for the orderer +# +################################################################################ +Metrics: + # The metrics provider is one of statsd, prometheus, or disabled + Provider: prometheus + + # The statsd configuration + Statsd: + # network type: tcp or udp + Network: udp + + # the statsd server address + Address: 127.0.0.1:8125 + + # The interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + WriteInterval: 30s + + # The prefix is prepended to all emitted statsd metrics + Prefix: + +################################################################################ +# +# Admin Configuration +# +# - This configures the admin server endpoint for the orderer +# +################################################################################ +Admin: + # host and port for the admin server + ListenAddress: 127.0.0.1:9443 + + # TLS configuration for the admin endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most admin service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + # + # NOTE: When TLS is enabled, the admin endpoint requires mutual TLS. The + # orderer will panic on startup if this value is set to false. + ClientAuthRequired: true + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Channel participation API Configuration +# +# - This provides the channel participation API configuration for the orderer. +# - Channel participation uses the ListenAddress and TLS settings of the Admin +# service. +# +################################################################################ +ChannelParticipation: + # Channel participation API is enabled. + # ibp updates this to enabled by default + Enabled: true + + # The maximum size of the request body when joining a channel. + MaxRequestBodySize: 1048576 + + +################################################################################ +# +# Consensus Configuration +# +# - This section contains config options for a consensus plugin. It is opaque +# to orderer, and completely up to consensus implementation to make use of. +# +################################################################################ +Consensus: + # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, + # we use following options: + + # WALDir specifies the location at which Write Ahead Logs for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + WALDir: /var/hyperledger/production/orderer/etcdraft/wal + + # SnapDir specifies the location at which snapshots for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot \ No newline at end of file diff --git a/defaultconfig/peer/core.yaml b/defaultconfig/peer/core.yaml new file mode 100644 index 00000000..7a0b771d --- /dev/null +++ b/defaultconfig/peer/core.yaml @@ -0,0 +1,728 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The Peer id is used for identifying this Peer instance. + id: jdoe + + # The networkId allows for logical seperation of networks + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer listenAddress. + # chaincodeAddress: 0.0.0.0:7052 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + addressAutoDetect: false + + # Setting for runtime.GOMAXPROCS(n). If n < 1, it does not change the + # current setting + gomaxprocs: -1 + + # Keepalive settings for peer server and clients + keepalive: + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: + - 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. It is recommended to + # use leader election for large networks of peers. + # ibp changes this from true to false + useLeaderElection: false + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization + # ibp changes this from false to true + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + # ibp updates it from 100 to 10 + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # Max number of attempts to connect to a peer + maxConnectionAttempts: 120 + # Message expiration factor for alive messages + msgExpirationFactor: 20 + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + # ibp updates this from 60s to 5s + pullRetryThreshold: 5s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + # ibp updates this from true to false + enabled: false + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflect the maximum distance between lowest and + # highest block sequence number state buffer to avoid holes. + # In order to ensure absence of the holes actual buffer size + # is twice of this distance + # ibp updates this from 100 to 20 + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + # Note that peer-chaincode connections through chaincodeListenAddress is + # not mutual TLS auth. See comments on chaincodeListenAddress for more info + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + # FileKeyStore: + # KeyStore: + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # The total time to spend retrying connections to ordering nodes + # before giving up and returning an error. + reconnectTotalTimeThreshold: 3600s + + # The connection timeout when connecting to ordering service nodes. + connTimeout: 3s + + # The maximum delay between consecutive connection retry attempts to + # ordering nodes. + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. + addressOverrides: + # - from: + # to: + # caCertsFile: + # - from: + # to: + # caCertsFile: + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # The admin service is used for administrative operations such as + # control over logger levels, etc. + # Only peer administrators can use the service. + adminService: + # The interface and port on which the admin server will listen on. + # If this is commented out, or the port number is equal to the port + # of the peer listen address - the admin service is attached to the + # peer's service (defaults to 7051). + #listenAddress: 0.0.0.0:7055 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(BASE_DOCKER_NS)/fabric-baseos:$(ARCH)-$(BASE_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + car: + # car may need more facilities (JVM, etc) in the future as the catalog + # of platforms are expanded. For now, we can just use baseos + runtime: $(BASE_DOCKER_NS)/fabric-baseos:$(ARCH)-$(BASE_VERSION) + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) + + node: + # need node.js engine at runtime, currently available in baseimage + # but not in baseos + runtime: $(BASE_DOCKER_NS)/fabric-baseimage:$(ARCH)-$(BASE_VERSION) + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + # ibp defaults changed from 30s to 60s + executetimeout: 60s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + cscc: enable + lscc: enable + qscc: enable + + # System chaincode plugins: + # System chaincodes can be loaded as shared objects compiled as Go plugins. + # See examples/plugins/scc for an example. + # Plugins must be white listed in the chaincode.system section above. + systemPlugins: + # example configuration: + # - enabled: true + # name: myscc + # path: /opt/lib/myscc.so + # invokableExternal: true + # invokableCC2CC: true + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompases both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + # ibp updates this from default to prometheus + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/defaultconfig/peer/ouconfig-inter.yaml b/defaultconfig/peer/ouconfig-inter.yaml new file mode 100644 index 00000000..20b2023a --- /dev/null +++ b/defaultconfig/peer/ouconfig-inter.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +NodeOUs: + Enable: true + ClientOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: client + PeerOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: peer + AdminOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: admin + OrdererOUIdentifier: + Certificate: intermediatecerts/intercert-0.pem + OrganizationalUnitIdentifier: orderer diff --git a/defaultconfig/peer/ouconfig.yaml b/defaultconfig/peer/ouconfig.yaml new file mode 100644 index 00000000..f2c00043 --- /dev/null +++ b/defaultconfig/peer/ouconfig.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +NodeOUs: + Enable: true + ClientOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: client + PeerOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: peer + AdminOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: admin + OrdererOUIdentifier: + Certificate: cacerts/cacert-0.pem + OrganizationalUnitIdentifier: orderer diff --git a/defaultconfig/peer/v2/core.yaml b/defaultconfig/peer/v2/core.yaml new file mode 100644 index 00000000..74438633 --- /dev/null +++ b/defaultconfig/peer/v2/core.yaml @@ -0,0 +1,799 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer address (see below). If specified peer address is invalid then it + # will fallback to the auto detected IP (local IP) regardless of the peer + # addressAutoDetect value. + # chaincodeAddress: 0.0.0.0:7052 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + # When set to true, will override peer address. + addressAutoDetect: false + + # Settings for the Peer's gateway server. + gateway: + # Whether the gateway is enabled for this Peer. + enabled: true + # endorsementTimeout is the duration the gateway waits for a response + # from other endorsing peers before returning a timeout error to the client. + endorsementTimeout: 30s + # dialTimeout is the duration the gateway waits for a connection + # to other network nodes. + dialTimeout: 2m + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: + - 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. + useLeaderElection: false + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization. Multiple peers or all peers in an organization + # may be configured as org leaders, so that they all pull + # blocks directly from ordering service. + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # Max number of attempts to connect to a peer + maxConnectionAttempts: 120 + # Message expiration factor for alive messages + msgExpirationFactor: 20 + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + # ibp updates this from 60s to 5s + pullRetryThreshold: 5s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. + # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values + # for disseminating private data. + # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to + # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. + implicitCollectionDisseminationPolicy: + # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully + # disseminate private data for its own implicit collection during endorsement. Default value is 0. + requiredPeerCount: 0 + # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to + # disseminate private data for its own implicit collection during endorsement. Default value is 1. + maxPeerCount: 1 + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: false + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actual buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. + addressOverrides: + # - from: + # to: + # caCertsFile: + # - from: + # to: + # caCertsFile: + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running requests to a service on each peer. + # Currently this option is only applied to endorser service and deliver service. + # When the property is missing or the value is 0, the concurrency limit is disabled for the service. + concurrency: + # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, + # including both user chaincodes and system chaincodes. + endorserService: 2500 + # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. + deliverService: 2500 + # gatewayService limits concurrent requests to gateway service that handles the submission and evaluation of transactions. + gatewayService: 500 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + # + # For the chaincode as a service external builders, this attribute must be _removed_, not set as a nil value, + # for the peer to avoid a launch time detection of the docker daemon on the local host. + # + # ibp changes this. + # endpoint: + + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + # ibp updates this with ibp related values + externalBuilders: + + # Default builder for "k8s" chaincode packages. + # See https://github.com/hyperledgendary/fabric-builder-k8s + - name: k8s_builder + path: /opt/hyperledger/k8s_builder + propagateEnvironment: + - CORE_PEER_ID + - KUBERNETES_SERVICE_HOST + - KUBERNETES_SERVICE_PORT + + # Default builder for chaincode-as-a-service, included in fabric + # opensource versions >= 2.4.2. This is a "no-op" builder and will not + # manage the lifecycle of pods, deployments, and services in k8s. The + # builder will only copy the chaincode package metadata, instructing the + # peer to connect to a remote CCaaS endpoint at a given service URL. + - name: ccaas-builder + path: /opt/hyperledger/ccaas_builder + propagateEnvironment: + - CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG + + # The maximum duration to wait for the chaincode build and install process + # to complete. + installTimeout: 300s + + # Timeout duration for starting up a container and waiting for Register + # to come through. + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + # ibp updates this from 30s to 60s + executetimeout: 60s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communication goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # enabled system chaincodes + system: + _lifecycle: enable + cscc: enable + lscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup. + # The delay between retries doubles for each attempt. + # Default of 10 retries results in 11 attempts over 2 minutes. + maxRetriesOnStartup: 10 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state + # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple + # of 32 MB, the peer would round the size to the next multiple of 32 MB. + # To disable the cache, 0 MB needs to be assigned to the cacheSize. + cacheSize: 64 + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + # The missing data entries are classified into two categories: + # (1) prioritized + # (2) deprioritized + # Initially, all missing data are in the prioritized list. When the + # reconciler is unable to fetch the missing data from other peers, + # the unreconciled missing data would be moved to the deprioritized list. + # The reconciler would retry deprioritized missing data after every + # deprioritizedDataReconcilerInterval (unit: minutes). Note that the + # interval needs to be greater than the reconcileSleepInterval + deprioritizedDataReconcilerInterval: 60m + + snapshots: + # Path on the file system where peer will store ledger snapshots + rootDir: /var/hyperledger/production/snapshots + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + # ibp updates this from disabled to prometheus + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/definitions/ca/deployment.yaml b/definitions/ca/deployment.yaml new file mode 100644 index 00000000..b155d175 --- /dev/null +++ b/definitions/ca/deployment.yaml @@ -0,0 +1,139 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ibpca-deployment +spec: + replicas: 1 + selector: {} + strategy: + type: Recreate + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + containers: + - command: + - sh + - -c + - mkdir -p /data/tlsca && cp /config/tlsca/fabric-ca-server-config.yaml /data/tlsca + && mkdir -p /data/ca && cp /config/ca/fabric-ca-server-config.yaml /data/ca + && fabric-ca-server start --home /data/ca + env: + - name: LICENSE + value: accept + - name: FABRIC_CA_HOME + value: /data/ca + - name: SERVICE_HOST + value: ca + - name: FABRIC_CA_SERVER_OPERATIONS_TLS_CERT_FILE + value: /crypto/ca/operations-cert.pem + - name: FABRIC_CA_SERVER_OPERATIONS_TLS_KEY_FILE + value: /crypto/ca/operations-key.pem + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 5 + name: ca + ports: + - containerPort: 7054 + name: ca + - containerPort: 9443 + name: operations + readinessProbe: + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 26 + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 2000m + ephemeral-storage: 1G + memory: 4Gi + requests: + cpu: 100m + ephemeral-storage: 100M + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 7051 + volumeMounts: + - mountPath: /crypto/ca + name: ca-crypto + - mountPath: /crypto/tlsca + name: tlsca-crypto + - mountPath: /config/ca + name: ca-config + - mountPath: /config/tlsca + name: tlsca-config + hostIPC: false + hostNetwork: false + hostPID: false + initContainers: + - command: + - sh + - -c + - mkdir -p /data/db && chmod -R 775 /data/ && chown -R -H 7051:7051 /data/ + env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + name: init + resources: + limits: + cpu: 200m + ephemeral-storage: 1G + memory: 400M + requests: + cpu: 200m + ephemeral-storage: 100M + memory: 400M + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + securityContext: + fsGroup: 7051 + runAsNonRoot: true + runAsUser: 7051 + serviceAccountName: sample + volumes: + - emptyDir: + medium: Memory + name: shared diff --git a/definitions/ca/ingress.yaml b/definitions/ca/ingress.yaml new file mode 100644 index 00000000..997f4f6c --- /dev/null +++ b/definitions/ca/ingress.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-ibpca + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s diff --git a/definitions/ca/ingressv1beta1.yaml b/definitions/ca/ingressv1beta1.yaml new file mode 100644 index 00000000..dd9bc319 --- /dev/null +++ b/definitions/ca/ingressv1beta1.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-ibpca + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s \ No newline at end of file diff --git a/definitions/ca/pvc.yaml b/definitions/ca/pvc.yaml new file mode 100644 index 00000000..96227167 --- /dev/null +++ b/definitions/ca/pvc.yaml @@ -0,0 +1,28 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "ibpca-pvc" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Mi" diff --git a/definitions/ca/role.yaml b/definitions/ca/role.yaml new file mode 100644 index 00000000..b2b86dff --- /dev/null +++ b/definitions/ca/role.yaml @@ -0,0 +1,37 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "ca-role" +rules: + - apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection diff --git a/definitions/ca/rolebinding.yaml b/definitions/ca/rolebinding.yaml new file mode 100644 index 00000000..d9105eb0 --- /dev/null +++ b/definitions/ca/rolebinding.yaml @@ -0,0 +1,29 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "ca-rb" +subjects: + - kind: ServiceAccount + name: sample +roleRef: + kind: Role + name: sample + apiGroup: rbac.authorization.k8s.io diff --git a/definitions/ca/route.yaml b/definitions/ca/route.yaml new file mode 100644 index 00000000..a7328266 --- /dev/null +++ b/definitions/ca/route.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: ibpca-route +spec: + host: ibpca.ipaddress.nip.io + port: + targetPort: http + tls: + termination: passthrough + to: + kind: Service + name: ibpca-service + weight: 100 + wildcardPolicy: None diff --git a/definitions/ca/service.yaml b/definitions/ca/service.yaml new file mode 100644 index 00000000..922ba287 --- /dev/null +++ b/definitions/ca/service.yaml @@ -0,0 +1,35 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Service +metadata: + name: ibpca-service +spec: + type: ClusterIP + selector: + release: "ibp-ca" + ports: + - port: 7054 + targetPort: 7054 + protocol: TCP + name: http + - port: 9443 + targetPort: 9443 + protocol: TCP + name: operations diff --git a/definitions/ca/serviceaccount.yaml b/definitions/ca/serviceaccount.yaml new file mode 100644 index 00000000..c260316c --- /dev/null +++ b/definitions/ca/serviceaccount.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "ca-sa" +automountServiceAccountToken: true +imagePullSecrets: + - name: ibm-entitlement-key diff --git a/definitions/console/configmap.yaml b/definitions/console/configmap.yaml new file mode 100644 index 00000000..328b7255 --- /dev/null +++ b/definitions/console/configmap.yaml @@ -0,0 +1,29 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ConfigMap +metadata: + name: ibpconsole-configmap +data: + REGION: prod + DB_CONNECTION_STRING: "http://localhost:5984" + APP_PORT: "3000" + CONFIGTXLATOR_URL_ORIGINAL: "http://localhost:8083" + DB_SYSTEM: athena-system + CONFIGURE_FILE: "/template/settings.yaml" diff --git a/definitions/console/console-configmap.yaml b/definitions/console/console-configmap.yaml new file mode 100644 index 00000000..d57684bb --- /dev/null +++ b/definitions/console/console-configmap.yaml @@ -0,0 +1,84 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ConfigMap +metadata: + name: ibpconsole-template-configmap +data: + settings.yaml: | + version: "v2.0" + initial_admin: "xyz@ibm.com" + auth_scheme: "couchdb" + configtxlator: "http://localhost:8083" + deployer_url: "http://dev:dev123@localhost:8080" + deployer_timeout: 60000 + db_custom_names: + DB_COMPONENTS: athena-components + DB_SESSIONS: athena-sessions + DB_SYSTEM: athena-system + enforce_backend_ssl: false + system_channel_id: 'testchainid' + dynamic_tls: false + dynamic_config: true + zone: prod + infrastructure: openshift + fabric_capabilities: + application: + - "V1_1" + - "V1_2" + - "V1_3" + - "V1_4_2" + - "V2_0" + channel: + - "V1_3" + - "V1_4_2" + - "V1_4_3" + - "V2_0" + orderer: + - "V1_1" + - "V1_4_2" + - "V2_0" + cluster_data: + # type: 'paid' + # zones: + # - singleZone + feature_flags: + import_only_enabled: false + read_only_enabled: false + create_channel_enabled: true + remote_peer_config_enabled: true + saas_enabled: true + mustgather_enabled: true + templates_enabled: false + capabilities_enabled: true + high_availability: true + enable_ou_identifier: true + infra_import_options: + platform: openshift + supported_cas: + - openshift + - ibmcloud + supported_orderers: + - openshift + - ibmcloud + supported_peers: + - openshift + - ibmcloud + #{{- if .Values.app.extraConfig }} + #{{ toYaml .Values.app.extraConfig | indent 4 }} diff --git a/definitions/console/deployer-configmap.yaml b/definitions/console/deployer-configmap.yaml new file mode 100644 index 00000000..cc61238b --- /dev/null +++ b/definitions/console/deployer-configmap.yaml @@ -0,0 +1,197 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ConfigMap +metadata: + name: ibpconsole-deployer-template +data: + settings.yaml: | + db: + connectionurl: "http://localhost:5984" + createdb: true + components: + name: "components" + designdocs: + - ./designdocs/components/service_broker.json + - ./designdocs/components/deployer.json + - ./designdocs/components/plutus.json + - ./designdocs/components/search_indices.json + port: 8080 + loglevel: debug + tls: + enabled: false + certpath: /certs/tls.crt + keypath: /certs/tls.key + auth: + username: dev + password: dev123 + + imagePullSecret: "" + usetags: false + versions: + ca: + 1.5.3-1: + default: true + version: 1.5.3-1 + image: + caInitImage: registry.access.redhat.com/ubi8/ubi-minimal + caInitTag: latest + caImage: hyperledger/fabric-ca + caTag: 1.5.3 + + peer: + 2.4.3-1: + default: true + version: 2.4.3-1 + image: + peerInitImage: registry.access.redhat.com/ubi8/ubi-minimal + peerInitTag: latest + peerImage: hyperledger/fabric-peer + peerTag: 2.4.3 + couchdbImage: couchdb + couchdbTag: 3.1.2 + grpcwebImage: ghcr.io/hyperledger-labs/grpc-web + grpcwebTag: latest + orderer: + 2.4.3-1: + default: true + version: 2.4.3-1 + image: + ordererInitImage: registry.access.redhat.com/ubi8/ubi-minimal + ordererInitTag: latest + ordererImage: hyperledger/fabric-orderer + ordererTag: 2.4.3 + grpcwebImage: ghcr.io/hyperledger-labs/grpc-web + grpcwebTag: latest + + defaults: + storage: + ca: + ca: + size: 1Gi + class: "" + peer: + statedb: + size: 10Gi + class: "" + peer: + size: 10Gi + class: "" + orderer: + orderer: + size: 10Gi + class: "" + resources: + ca: + ca: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M + init: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M + peer: + peer: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + fluentd: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + couchdb: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + dind: + limits: + cpu: 500m + memory: 1000M + requests: + cpu: 500m + memory: 1000M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + chaincodelauncher: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + orderer: + orderer: + limits: + cpu: 250m + memory: 500M + ephemeral-storage: 1G + requests: + cpu: 250m + memory: 500M + ephemeral-storage: 100M + proxy: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M + init: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M diff --git a/definitions/console/deployer-service.yaml b/definitions/console/deployer-service.yaml new file mode 100644 index 00000000..6c0a591e --- /dev/null +++ b/definitions/console/deployer-service.yaml @@ -0,0 +1,31 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Service +metadata: + name: "ibpconsole-deployer-service" +spec: + type: ClusterIP + ports: + - name: deployer + port: 8080 + targetPort: 8080 + protocol: TCP + selector: + deployment: "console" diff --git a/definitions/console/deployment.yaml b/definitions/console/deployment.yaml new file mode 100644 index 00000000..7bfdada6 --- /dev/null +++ b/definitions/console/deployment.yaml @@ -0,0 +1,204 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ibpconsole-deployment +spec: + selector: {} + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + containers: + - env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - curl -X POST -k https://localhost:3000/api/v3/requests/stop + livenessProbe: + failureThreshold: 5 + httpGet: + path: /api/v3/healthcheck + port: optools + scheme: HTTPS + initialDelaySeconds: 60 + timeoutSeconds: 5 + name: optools + ports: + - containerPort: 3000 + name: optools + protocol: TCP + - containerPort: 3001 + name: proxy + protocol: TCP + readinessProbe: + httpGet: + path: /api/v3/healthcheck + port: optools + scheme: HTTPS + initialDelaySeconds: 55 + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 500m + ephemeral-storage: 1G + memory: 1000Mi + requests: + cpu: 500m + ephemeral-storage: 100M + memory: 1000Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /template/ + name: template + - env: + - name: LICENSE + value: accept + - name: CONFIGPATH + value: /deployer/settings.yaml + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 16 + tcpSocket: + port: 8080 + timeoutSeconds: 5 + name: deployer + ports: + - containerPort: 8080 + name: api + protocol: TCP + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + tcpSocket: + port: 8080 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + ephemeral-storage: 1G + memory: 200Mi + requests: + cpu: 100m + ephemeral-storage: 100M + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /deployer/ + name: deployer-template + - command: + - sh + - -c + - configtxlator start --port=8083 --CORS=* + env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 16 + tcpSocket: + port: 8083 + timeoutSeconds: 5 + name: configtxlator + ports: + - containerPort: 8083 + name: configtxlator + protocol: TCP + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + tcpSocket: + port: 8083 + timeoutSeconds: 5 + resources: + limits: + cpu: 25m + ephemeral-storage: 1G + memory: 50Mi + requests: + cpu: 25m + ephemeral-storage: 100M + memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + hostIPC: false + hostNetwork: false + hostPID: false + initContainers: + - env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + name: init + resources: + limits: + cpu: 200m + ephemeral-storage: 1G + memory: 400M + requests: + cpu: 200m + ephemeral-storage: 100M + memory: 400M + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: sample diff --git a/definitions/console/ingress.yaml b/definitions/console/ingress.yaml new file mode 100644 index 00000000..f96cf641 --- /dev/null +++ b/definitions/console/ingress.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-peer + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s diff --git a/definitions/console/ingressv1beta1.yaml b/definitions/console/ingressv1beta1.yaml new file mode 100644 index 00000000..38391848 --- /dev/null +++ b/definitions/console/ingressv1beta1.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-peer + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s \ No newline at end of file diff --git a/definitions/console/networkpolicy-denyall.yaml b/definitions/console/networkpolicy-denyall.yaml new file mode 100644 index 00000000..e60d6c94 --- /dev/null +++ b/definitions/console/networkpolicy-denyall.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: networkpolicy-denyall +spec: + podSelector: {} + ingress: [] diff --git a/definitions/console/networkpolicy-ingress.yaml b/definitions/console/networkpolicy-ingress.yaml new file mode 100644 index 00000000..3f9025a0 --- /dev/null +++ b/definitions/console/networkpolicy-ingress.yaml @@ -0,0 +1,54 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: networkpolicy-ingress +spec: + ingress: + - from: [] # everywhere + ports: + - port: 7051 # peer-api + protocol: TCP + - port: 9443 # peer-operations / ca-operations + protocol: TCP + - port: 7443 # peer-grpcweb / orderer-grpcweb + protocol: TCP + - port: 7052 + protocol: TCP # peer-chaincode + - port: 3000 # optools + protocol: TCP + - port: 7050 # orderer-grpc + protocol: TCP + - port: 8443 # orderer-operations + protocol: TCP + - port: 22222 # fileserver #check install/invoke chaincode + protocol: TCP + - port: 11111 # grpc #check install/invoke chaincode + protocol: TCP + - port: 7054 # ca + protocol: TCP + # - port: 443 # tcp + # protocol: TCP + # - port: 8080 # tcp + # protocol: TCP + podSelector: + matchLabels: {} + policyTypes: + - Ingress diff --git a/definitions/console/pvc.yaml b/definitions/console/pvc.yaml new file mode 100644 index 00000000..6645164b --- /dev/null +++ b/definitions/console/pvc.yaml @@ -0,0 +1,28 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ibpconsole-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" diff --git a/definitions/console/role.yaml b/definitions/console/role.yaml new file mode 100644 index 00000000..18d307b4 --- /dev/null +++ b/definitions/console/role.yaml @@ -0,0 +1,79 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "console-role" +rules: + - apiGroups: + - "" + resources: + - secrets + - services + - configmaps + - pods + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - ibp.com + resources: + - ibpcas + - ibppeers + - ibporderers + - ibpconsoles + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "" + - "apps" + - "extensions" + - "networking.k8s.io" + resources: + - pods/log + - persistentvolumeclaims + - deployments + - replicas + - ingresses + verbs: + - "get" + - "list" + - "watch" + - apiGroups: + - "" + - "route.openshift.io" + resources: + - "routes" + verbs: + - "get" + - "list" + - "watch" diff --git a/definitions/console/rolebinding.yaml b/definitions/console/rolebinding.yaml new file mode 100644 index 00000000..115d754f --- /dev/null +++ b/definitions/console/rolebinding.yaml @@ -0,0 +1,29 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "console-rb" +subjects: + - kind: ServiceAccount + name: sample +roleRef: + kind: Role + name: sample + apiGroup: rbac.authorization.k8s.io diff --git a/definitions/console/route.yaml b/definitions/console/route.yaml new file mode 100644 index 00000000..845efdc1 --- /dev/null +++ b/definitions/console/route.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: ibpconsole-route +spec: + host: ibpconsole.ipaddress.nip.io + port: + targetPort: optools + tls: + termination: passthrough + to: + kind: Service + name: ibpconsole-service + weight: 100 + wildcardPolicy: None diff --git a/definitions/console/service.yaml b/definitions/console/service.yaml new file mode 100644 index 00000000..a7a79612 --- /dev/null +++ b/definitions/console/service.yaml @@ -0,0 +1,31 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Service +metadata: + name: "ibpconsole-service" +spec: + type: ClusterIP + ports: + - name: optools + port: 3000 + targetPort: 3000 + protocol: TCP + selector: + deployment: "console" diff --git a/definitions/console/serviceaccount.yaml b/definitions/console/serviceaccount.yaml new file mode 100644 index 00000000..74c11e10 --- /dev/null +++ b/definitions/console/serviceaccount.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "console-sa" +automountServiceAccountToken: true +imagePullSecrets: + - name: ibm-entitlement-key diff --git a/definitions/orderer/configmap.yaml b/definitions/orderer/configmap.yaml new file mode 100644 index 00000000..d442450e --- /dev/null +++ b/definitions/orderer/configmap.yaml @@ -0,0 +1,45 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: "orderer-cm" +data: + ORDERER_GENERAL_LISTENADDRESS: "0.0.0.0" + ORDERER_GENERAL_LISTENPORT: "7050" + ORDERER_GENERAL_LEDGERTYPE: "file" + ORDERER_FILELEDGER_LOCATION: "/ordererdata/ledger/ibporderer" + ORDERER_GENERAL_TLS_ENABLED: "true" + ORDERER_GENERAL_LOCALMSPDIR: "/certs/msp" + ORDERER_GENERAL_TLS_PRIVATEKEY: "/certs/tls/keystore/key.pem" + ORDERER_GENERAL_TLS_CERTIFICATE: "/certs/tls/signcerts/cert.pem" + ORDERER_GENERAL_TLS_ROOTCAS: "/certs/msp/tlscacerts/cacert-0.pem" + # operations + ORDERER_OPERATIONS_LISTENADDRESS: "0.0.0.0:8443" + ORDERER_OPERATIONS_TLS_ENABLED: "true" + ORDERER_OPERATIONS_TLS_CERTIFICATE: "/certs/tls/signcerts/cert.pem" + ORDERER_OPERATIONS_TLS_PRIVATEKEY: "/certs/tls/keystore/key.pem" + ORDERER_OPERATIONS_TLS_CLIENTAUTHREQUIRED: "false" + ORDERER_OPERATIONS_TLS_ROOTCAS: "/certs/msp/tlscacerts/cacert-0.pem" + ORDERER_OPERATIONS_TLS_CLIENTROOTCAS: "/certs/msp/tlscacerts/cacert-0.pem" + # raft + ORDERER_CONSENSUS_WALDIR: "/ordererdata/raft/ibporderer/wal" + ORDERER_CONSENSUS_SNAPDIR: "/ordererdata/raft/ibporderer/snapshot" + ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE: "/certs/tls/signcerts/cert.pem" + ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY: "/certs/tls/keystore/key.pem" + ORDERER_GENERAL_CLUSTER_ROOTCAS: "/certs/msp/tlscacerts/cacert-0.pem" diff --git a/definitions/orderer/deployment.yaml b/definitions/orderer/deployment.yaml new file mode 100644 index 00000000..8d4b5b55 --- /dev/null +++ b/definitions/orderer/deployment.yaml @@ -0,0 +1,226 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ibporderer-deployment +spec: + replicas: 1 + selector: {} + strategy: + type: Recreate + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: orgname + operator: In + values: + - "" + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - env: + - name: LICENSE + value: accept + - name: FABRIC_CFG_PATH + value: /certs/ + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + name: orderer + ports: + - containerPort: 7050 + name: orderer + - containerPort: 8443 + name: operations + - containerPort: 9443 + name: orderer-admin + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 26 + periodSeconds: 10 + resources: + limits: + cpu: 2000m + ephemeral-storage: 1G + memory: 4Gi + requests: + cpu: 100m + ephemeral-storage: 100M + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 7051 + startupProbe: + failureThreshold: 30 + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /ordererdata + name: orderer-data + subPath: data + - mountPath: /certs/msp/cacerts + name: ecert-cacerts + - mountPath: /certs/msp/signcerts + name: ecert-signcert + - mountPath: /certs/msp/tlscacerts + name: tls-cacerts + - mountPath: /certs/tls/keystore + name: tls-keystore + - mountPath: /certs/tls/signcerts + name: tls-signcert + - mountPath: /certs + name: orderer-config + - mountPath: /certs/msp + name: orderer-config + - env: + - name: LICENSE + value: accept + - name: BACKEND_ADDRESS + value: 127.0.0.1:7050 + - name: SERVER_TLS_CERT_FILE + value: /certs/tls/signcerts/cert.pem + - name: SERVER_TLS_KEY_FILE + value: /certs/tls/keystore/key.pem + - name: SERVER_TLS_CLIENT_CA_FILES + value: /certs/msp/tlscacerts/cacert-0.pem + - name: SERVER_BIND_ADDRESS + value: 0.0.0.0 + - name: SERVER_HTTP_DEBUG_PORT + value: "8080" + - name: SERVER_HTTP_TLS_PORT + value: "7443" + - name: BACKEND_TLS + value: "true" + - name: SERVER_HTTP_MAX_WRITE_TIMEOUT + value: 5m + - name: SERVER_HTTP_MAX_READ_TIMEOUT + value: 5m + - name: USE_WEBSOCKETS + value: "true" + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 6 + tcpSocket: + port: 7443 + initialDelaySeconds: 30 + timeoutSeconds: 5 + name: proxy + ports: + - containerPort: 8080 + name: http + - containerPort: 7443 + name: https + readinessProbe: + tcpSocket: + port: 7443 + initialDelaySeconds: 26 + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 2000m + ephemeral-storage: 1G + memory: 4Gi + requests: + cpu: 100m + ephemeral-storage: 100M + memory: 100Mi + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /certs/msp/tlscacerts + name: tls-cacerts + - mountPath: /certs/tls/keystore + name: tls-keystore + - mountPath: /certs/tls/signcerts + name: tls-signcert + hostIPC: false + hostNetwork: false + hostPID: false + initContainers: + - command: + - sh + - -c + - chmod -R 775 /ordererdata/ && chown -R -H 7051:7051 /ordererdata/ + env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + name: init + resources: + limits: + cpu: 200m + ephemeral-storage: 1G + memory: 400M + requests: + cpu: 200m + ephemeral-storage: 100M + memory: 400M + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + volumeMounts: + - mountPath: /ordererdata + name: orderer-data + subPath: data + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: sample diff --git a/definitions/orderer/ingress.yaml b/definitions/orderer/ingress.yaml new file mode 100644 index 00000000..7009e7cb --- /dev/null +++ b/definitions/orderer/ingress.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-orderer + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s diff --git a/definitions/orderer/ingressv1beta1.yaml b/definitions/orderer/ingressv1beta1.yaml new file mode 100644 index 00000000..5f984e51 --- /dev/null +++ b/definitions/orderer/ingressv1beta1.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-orderer + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s diff --git a/definitions/orderer/orderernode.yaml b/definitions/orderer/orderernode.yaml new file mode 100644 index 00000000..be9cca96 --- /dev/null +++ b/definitions/orderer/orderernode.yaml @@ -0,0 +1,84 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: IBPOrderer +metadata: + name: + labels: +spec: + orgName: + mspID: + # DEPRECATED generateGenesis: "false" + genesisProfile: + genesisBlock: + systemChannelName: + # DEPRECATED ordererUrl: + # DEPRECATED mspSecretName: + ordererType: + # DEPRECATED externalAddress: + metrics: + image: + ordererInitImage: + ordererInitTag: + ordererImage: + ordererTag: + grpcwebImage: + grpcwebTag: + imagePullSecret: + service: + storage: + orderer: + size: + class: + resources: + init: + limits: + cpu: + memory: + ephemeral-storage: + requests: + cpu: + memory: + ephemeral-storage: + orderer: + limits: + cpu: + memory: + ephemeral-storage: + requests: + cpu: + memory: + ephemeral-storage: + proxy: + limits: + cpu: + memory: + ephemeral-storage: + requests: + cpu: + memory: + ephemeral-storage: + arch: + zone: + region: + replicas: 1 + domain: + secret: + enrollment: + msp: + clusterSize: 1 diff --git a/definitions/orderer/pvc.yaml b/definitions/orderer/pvc.yaml new file mode 100644 index 00000000..526eb9c3 --- /dev/null +++ b/definitions/orderer/pvc.yaml @@ -0,0 +1,28 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "orderer-pvc" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Mi" diff --git a/definitions/orderer/role.yaml b/definitions/orderer/role.yaml new file mode 100644 index 00000000..24872764 --- /dev/null +++ b/definitions/orderer/role.yaml @@ -0,0 +1,36 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "orderer-role" +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection diff --git a/definitions/orderer/rolebinding.yaml b/definitions/orderer/rolebinding.yaml new file mode 100644 index 00000000..297d183c --- /dev/null +++ b/definitions/orderer/rolebinding.yaml @@ -0,0 +1,28 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "orderer-rb" +subjects: + - kind: ServiceAccount + name: sample +roleRef: + kind: Role + name: sample + apiGroup: rbac.authorization.k8s.io diff --git a/definitions/orderer/route.yaml b/definitions/orderer/route.yaml new file mode 100644 index 00000000..bc4bbacf --- /dev/null +++ b/definitions/orderer/route.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: ibporderer-route +spec: + host: ibporderer.ipaddress.nip.io + port: + targetPort: http + tls: + termination: passthrough + to: + kind: Service + name: ibporderer-service + weight: 100 + wildcardPolicy: None diff --git a/definitions/orderer/saas-ingress-community.yaml b/definitions/orderer/saas-ingress-community.yaml new file mode 100644 index 00000000..02294911 --- /dev/null +++ b/definitions/orderer/saas-ingress-community.yaml @@ -0,0 +1,42 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-orderer-community + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/proxy-ssl-verify: "false" + nginx.ingress.kubernetes.io/proxy-body-size: 25m +spec: + rules: + - host: fake.host + http: + paths: + - backend: + service: + name: fake-service + port: + name: some-port + path: / + pathType: ImplementationSpecific + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/orderer/saas-ingress.yaml b/definitions/orderer/saas-ingress.yaml new file mode 100644 index 00000000..68f828cf --- /dev/null +++ b/definitions/orderer/saas-ingress.yaml @@ -0,0 +1,47 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-orderer + annotations: + ingress.bluemix.net/ssl-services: ssl-service= + ingress.bluemix.net/redirect-to-https: "True" + ingress.bluemix.net/client-max-body-size: size=25m + ingress.bluemix.net/large-client-header-buffers: number=10 size=25K + ingress.bluemix.net/proxy-buffering: enabled=false serviceName= + ingress.bluemix.net/proxy-read-timeout: "serviceName= timeout=300s" + ingress.bluemix.net/proxy-buffers: "serviceName= number=4 size=25K" + ingress.bluemix.net/proxy-buffer-size: "serviceName= size=25K" +spec: + rules: + - host: fake.host + http: + paths: + - backend: + service: + name: fake-service + port: + name: some-port + path: / + pathType: ImplementationSpecific + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/orderer/saas-ingressv1beta1-community.yaml b/definitions/orderer/saas-ingressv1beta1-community.yaml new file mode 100644 index 00000000..a2fa0af4 --- /dev/null +++ b/definitions/orderer/saas-ingressv1beta1-community.yaml @@ -0,0 +1,38 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-orderer-community + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/proxy-ssl-verify: "false" + nginx.ingress.kubernetes.io/proxy-body-size: 25m +spec: + rules: + - host: fake.host + http: + paths: + - backend: + serviceName: fake-service + servicePort: some-port + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/orderer/saas-ingressv1beta1.yaml b/definitions/orderer/saas-ingressv1beta1.yaml new file mode 100644 index 00000000..83b4e19f --- /dev/null +++ b/definitions/orderer/saas-ingressv1beta1.yaml @@ -0,0 +1,43 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-orderer + annotations: + ingress.bluemix.net/ssl-services: ssl-service= + ingress.bluemix.net/redirect-to-https: "True" + ingress.bluemix.net/client-max-body-size: size=25m + ingress.bluemix.net/large-client-header-buffers: number=10 size=25K + ingress.bluemix.net/proxy-buffering: enabled=false serviceName= + ingress.bluemix.net/proxy-read-timeout: "serviceName= timeout=300s" + ingress.bluemix.net/proxy-buffers: "serviceName= number=4 size=25K" + ingress.bluemix.net/proxy-buffer-size: "serviceName= size=25K" +spec: + rules: + - host: fake.host + http: + paths: + - backend: + serviceName: fake-service + servicePort: some-port + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/orderer/service.yaml b/definitions/orderer/service.yaml new file mode 100644 index 00000000..61440a5c --- /dev/null +++ b/definitions/orderer/service.yaml @@ -0,0 +1,39 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Service +metadata: + name: "ibporderer-service" +spec: + selector: + release: "ibp-orderer" + type: ClusterIP + ports: + - name: "orderer-grpc" + port: 7050 + targetPort: 7050 + - name: "operations" + port: 8443 + targetPort: 8443 + - name: "grpcweb" + port: 7443 + targetPort: 7443 + - name: "orderer-admin" + port: 9443 + targetPort: 9443 diff --git a/definitions/orderer/serviceaccount.yaml b/definitions/orderer/serviceaccount.yaml new file mode 100644 index 00000000..bb9557b4 --- /dev/null +++ b/definitions/orderer/serviceaccount.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "orderer-sa" +automountServiceAccountToken: true +imagePullSecrets: + - name: ibm-entitlement-key diff --git a/definitions/peer/chaincode-launcher.yaml b/definitions/peer/chaincode-launcher.yaml new file mode 100644 index 00000000..1b3830b1 --- /dev/null +++ b/definitions/peer/chaincode-launcher.yaml @@ -0,0 +1,64 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "chaincode-launcher" +imagePullPolicy: Always +securityContext: + privileged: false + readOnlyRootFileSystem: false + runAsNonRoot: true + runAsUser: 7051 + capabilities: + drop: + - ALL +ports: + - name: fileserver + containerPort: 22222 + - name: grpc + containerPort: 11111 +livenessProbe: + httpGet: + path: /healthz + port: fileserver + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + failureThreshold: 6 +readinessProbe: + httpGet: + path: /healthz + port: fileserver + scheme: HTTP + initialDelaySeconds: 26 + timeoutSeconds: 5 + periodSeconds: 5 +resources: + requests: + cpu: 0.1 + memory: "100Mi" + limits: + cpu: 2 + memory: "2Gi" +env: + - name: "LICENSE" + value: "accept" + - name: FILE_SERVER_LISTEN_IP + value: "0.0.0.0" + - name: FILE_SERVER_BASE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/definitions/peer/couchdb-init.yaml b/definitions/peer/couchdb-init.yaml new file mode 100644 index 00000000..8267542e --- /dev/null +++ b/definitions/peer/couchdb-init.yaml @@ -0,0 +1,50 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "couchdbinit" +image: "registry.access.redhat.com/ubi8/ubi-minimal:latest" +imagePullPolicy: Always +resources: + requests: + cpu: 0.2 + memory: 400M + limits: + cpu: 0.2 + memory: 400M +command: + - sh + - -c + - chmod -R 775 /opt/couchdb/data/ && chown -R -H 5984:0 /opt/couchdb/data/ +securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + capabilities: + drop: + - ALL + add: + - CHOWN + - FOWNER +env: + - name: "LICENSE" + value: "accept" +volumeMounts: + - mountPath: "/opt/couchdb/data" + name: "db-data" + subPath: "data" diff --git a/definitions/peer/couchdb-pvc.yaml b/definitions/peer/couchdb-pvc.yaml new file mode 100644 index 00000000..c56c71b5 --- /dev/null +++ b/definitions/peer/couchdb-pvc.yaml @@ -0,0 +1,28 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "couchdb-pvc" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Gi" diff --git a/definitions/peer/couchdb.yaml b/definitions/peer/couchdb.yaml new file mode 100644 index 00000000..d0ad8486 --- /dev/null +++ b/definitions/peer/couchdb.yaml @@ -0,0 +1,64 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "couchdb" +image: "" +imagePullPolicy: Always +securityContext: + privileged: false + readOnlyRootFileSystem: false + runAsNonRoot: true + runAsUser: 5984 + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + - CHOWN + - DAC_OVERRIDE + - SETGID + - SETUID + - FOWNER +livenessProbe: + tcpSocket: + port: 5984 + initialDelaySeconds: 30 + timeoutSeconds: 5 + periodSeconds: 6 +readinessProbe: + tcpSocket: + port: 5984 + initialDelaySeconds: 56 + timeoutSeconds: 5 + periodSeconds: 5 +ports: + - containerPort: 5984 +resources: + requests: + cpu: 0.1 + memory: "100Mi" + limits: + cpu: 2 + memory: "4Gi" +env: + - name: "LICENSE" + value: "accept" +volumeMounts: + - mountPath: "/opt/couchdb/data" + name: "db-data" + subPath: "data" + diff --git a/definitions/peer/deployment.yaml b/definitions/peer/deployment.yaml new file mode 100644 index 00000000..c965927c --- /dev/null +++ b/definitions/peer/deployment.yaml @@ -0,0 +1,313 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ibppeer-deployment +spec: + replicas: 1 + selector: {} + strategy: + type: Recreate + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: orgname + operator: In + values: + - "" + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 6 + initialDelaySeconds: 30 + tcpSocket: + port: 2375 + timeoutSeconds: 5 + name: dind + ports: + - containerPort: 2375 + readinessProbe: + exec: + command: + - readiness.sh + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 500m + memory: 1000M + requests: + cpu: 500m + memory: 1000M + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: true + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + - env: + - name: LICENSE + value: accept + - name: CORE_PEER_LISTENADDRESS + value: 0.0.0.0:7051 + - name: CORE_PEER_CHAINCODELISTENADDRESS + value: 0.0.0.0:7052 + - name: CORE_PEER_MSPCONFIGPATH + value: /certs/msp + - name: CORE_PEER_FILESYSTEMPATH + value: /data/peer/ + - name: CORE_PEER_TLS_ENABLED + value: "true" + - name: CORE_PEER_TLS_CERT_FILE + value: /certs/tls/signcerts/cert.pem + - name: CORE_PEER_TLS_KEY_FILE + value: /certs/tls/keystore/key.pem + - name: CORE_PEER_TLS_ROOTCERT_FILE + value: /certs/msp/tlscacerts/cacert-0.pem + - name: FABRIC_CFG_PATH + value: /certs + - name: CORE_OPERATIONS_LISTENADDRESS + value: 0.0.0.0:9443 + - name: CORE_OPERATIONS_TLS_ENABLED + value: "true" + - name: CORE_OPERATIONS_TLS_CERT_FILE + value: /certs/tls/signcerts/cert.pem + - name: CORE_OPERATIONS_TLS_KEY_FILE + value: /certs/tls/keystore/key.pem + - name: CORE_OPERATIONS_TLS_CLIENTAUTHREQUIRED + value: "false" + - name: CORE_OPERATIONS_TLS_CLIENTROOTCAS_FILES + value: /certs/msp/tlscacerts/cacert-0.pem + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 5 + name: peer + ports: + - containerPort: 7051 + name: peer + - containerPort: 7052 + name: chaincodelisten + - containerPort: 9443 + name: operations + readinessProbe: + httpGet: + path: /healthz + port: operations + scheme: HTTPS + initialDelaySeconds: 26 + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 7051 + volumeMounts: + - mountPath: /data + name: fabric-peer-0 + subPath: data + - mountPath: /certs/msp/cacerts + name: ecert-cacerts + - mountPath: /certs/msp/signcerts + name: ecert-signcert + - mountPath: /certs/msp/tlscacerts + name: tls-cacerts + - mountPath: /certs/tls/keystore + name: tls-keystore + - mountPath: /certs/tls/signcerts + name: tls-signcert + - mountPath: /certs + name: peer-config + - mountPath: /certs/msp + name: peer-config + - env: + - name: LICENSE + value: accept + - name: BACKEND_ADDRESS + value: 127.0.0.1:7051 + - name: SERVER_TLS_CERT_FILE + value: /certs/tls/signcerts/cert.pem + - name: SERVER_TLS_KEY_FILE + value: /certs/tls/keystore/key.pem + - name: SERVER_TLS_CLIENT_CA_FILES + value: /certs/msp/tlscacerts/cacert-0.pem + - name: SERVER_BIND_ADDRESS + value: 0.0.0.0 + - name: SERVER_HTTP_DEBUG_PORT + value: "8080" + - name: SERVER_HTTP_TLS_PORT + value: "7443" + - name: BACKEND_TLS + value: "true" + - name: SERVER_HTTP_MAX_WRITE_TIMEOUT + value: 5m + - name: SERVER_HTTP_MAX_READ_TIMEOUT + value: 5m + - name: USE_WEBSOCKETS + value: "true" + image: "" + imagePullPolicy: Always + livenessProbe: + failureThreshold: 6 + tcpSocket: + port: 7443 + initialDelaySeconds: 30 + timeoutSeconds: 5 + name: proxy + ports: + - containerPort: 8080 + name: http + - containerPort: 7443 + name: https + readinessProbe: + tcpSocket: + port: 7443 + initialDelaySeconds: 26 + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /certs/msp/tlscacerts + name: tls-cacerts + - mountPath: /certs/tls/signcerts + name: tls-signcert + - mountPath: /certs/tls/keystore + name: tls-keystore + - env: + - name: LICENSE + value: accept + image: fluent/fluentd:v1.4-2 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 6 + initialDelaySeconds: 30 + tcpSocket: + port: 9880 + timeoutSeconds: 5 + name: chaincode-logs + ports: + - containerPort: 9880 + readinessProbe: + initialDelaySeconds: 26 + periodSeconds: 5 + tcpSocket: + port: 9880 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /fluentd/etc + name: fluentd-config + hostIPC: false + hostNetwork: false + hostPID: false + initContainers: + - env: + - name: LICENSE + value: accept + image: "" + imagePullPolicy: Always + name: init + resources: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - FOWNER + drop: + - ALL + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + volumeMounts: + - mountPath: /data + name: fabric-peer-0 + subPath: data + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 diff --git a/definitions/peer/fluentd-configmap.yaml b/definitions/peer/fluentd-configmap.yaml new file mode 100644 index 00000000..d58b70c6 --- /dev/null +++ b/definitions/peer/fluentd-configmap.yaml @@ -0,0 +1,32 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ConfigMap +metadata: + name: "fluentd-cm" +data: + fluent.conf: | + + @type forward + port 9880 + bind 0.0.0.0 + + + @type stdout + diff --git a/definitions/peer/ingress.yaml b/definitions/peer/ingress.yaml new file mode 100644 index 00000000..2139607d --- /dev/null +++ b/definitions/peer/ingress.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-peer + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s diff --git a/definitions/peer/ingressv1beta1.yaml b/definitions/peer/ingressv1beta1.yaml new file mode 100644 index 00000000..5819fccd --- /dev/null +++ b/definitions/peer/ingressv1beta1.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-peer + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s \ No newline at end of file diff --git a/definitions/peer/pvc.yaml b/definitions/peer/pvc.yaml new file mode 100644 index 00000000..7b94262d --- /dev/null +++ b/definitions/peer/pvc.yaml @@ -0,0 +1,28 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "peer-pvc" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Gi" diff --git a/definitions/peer/role.yaml b/definitions/peer/role.yaml new file mode 100644 index 00000000..cc651fc9 --- /dev/null +++ b/definitions/peer/role.yaml @@ -0,0 +1,41 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "peer-role" +rules: + - apiGroups: + - "" + - "apps" + resources: + - pods + - pods/log + - secrets + - deployments/finalizers + - pods/finalizers + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection diff --git a/definitions/peer/rolebinding.yaml b/definitions/peer/rolebinding.yaml new file mode 100644 index 00000000..f8bdc138 --- /dev/null +++ b/definitions/peer/rolebinding.yaml @@ -0,0 +1,29 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "peer-role" +subjects: + - kind: ServiceAccount + name: sample +roleRef: + kind: Role + name: sample + apiGroup: rbac.authorization.k8s.io diff --git a/definitions/peer/route.yaml b/definitions/peer/route.yaml new file mode 100644 index 00000000..a7451d8e --- /dev/null +++ b/definitions/peer/route.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: ibppeer-route +spec: + host: ibppeer.ipaddress.nip.io + port: + targetPort: http + tls: + termination: passthrough + to: + kind: Service + name: ibppeer-service + weight: 100 + wildcardPolicy: None diff --git a/definitions/peer/saas-ingress-community.yaml b/definitions/peer/saas-ingress-community.yaml new file mode 100644 index 00000000..2adc6442 --- /dev/null +++ b/definitions/peer/saas-ingress-community.yaml @@ -0,0 +1,42 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-peer-community + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/proxy-ssl-verify: "false" + nginx.ingress.kubernetes.io/proxy-body-size: 25m +spec: + rules: + - host: fake.host + http: + paths: + - backend: + service: + name: fake-service + port: + name: some-port + path: / + pathType: ImplementationSpecific + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/peer/saas-ingress.yaml b/definitions/peer/saas-ingress.yaml new file mode 100644 index 00000000..e242757a --- /dev/null +++ b/definitions/peer/saas-ingress.yaml @@ -0,0 +1,47 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-peer + annotations: + ingress.bluemix.net/ssl-services: ssl-service= + ingress.bluemix.net/redirect-to-https: "True" + ingress.bluemix.net/client-max-body-size: size=25m + ingress.bluemix.net/large-client-header-buffers: number=10 size=25K + ingress.bluemix.net/proxy-buffering: enabled=false serviceName= + ingress.bluemix.net/proxy-read-timeout: "serviceName= timeout=300s" + ingress.bluemix.net/proxy-buffers: "serviceName= number=4 size=25K" + ingress.bluemix.net/proxy-buffer-size: "serviceName= size=25K" +spec: + rules: + - host: fake.host + http: + paths: + - backend: + service: + name: fake-service + port: + name: some-port + path: / + pathType: ImplementationSpecific + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/peer/saas-ingressv1beta1-community.yaml b/definitions/peer/saas-ingressv1beta1-community.yaml new file mode 100644 index 00000000..cf6a4764 --- /dev/null +++ b/definitions/peer/saas-ingressv1beta1-community.yaml @@ -0,0 +1,38 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-peer-community + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/proxy-ssl-verify: "false" + nginx.ingress.kubernetes.io/proxy-body-size: 25m +spec: + rules: + - host: fake.host + http: + paths: + - backend: + serviceName: fake-service + servicePort: some-port + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/peer/saas-ingressv1beta1.yaml b/definitions/peer/saas-ingressv1beta1.yaml new file mode 100644 index 00000000..d81f5fce --- /dev/null +++ b/definitions/peer/saas-ingressv1beta1.yaml @@ -0,0 +1,43 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: ingress-peer + annotations: + ingress.bluemix.net/ssl-services: ssl-service= + ingress.bluemix.net/redirect-to-https: "True" + ingress.bluemix.net/client-max-body-size: size=25m + ingress.bluemix.net/large-client-header-buffers: number=10 size=25K + ingress.bluemix.net/proxy-buffering: enabled=false serviceName= + ingress.bluemix.net/proxy-read-timeout: "serviceName= timeout=300s" + ingress.bluemix.net/proxy-buffers: "serviceName= number=4 size=25K" + ingress.bluemix.net/proxy-buffer-size: "serviceName= size=25K" +spec: + rules: + - host: fake.host + http: + paths: + - backend: + serviceName: fake-service + servicePort: some-port + path: / + tls: + - hosts: + - fake.host + secretName: fakesecret \ No newline at end of file diff --git a/definitions/peer/service.yaml b/definitions/peer/service.yaml new file mode 100644 index 00000000..5aab015a --- /dev/null +++ b/definitions/peer/service.yaml @@ -0,0 +1,36 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Service +metadata: + name: "peer-service" +spec: + selector: + release: "operator" + type: ClusterIP + ports: + - name: "peer-api" + port: 7051 + targetPort: 7051 + - name: "operations" + port: 9443 + targetPort: 9443 + - name: "grpcweb" + port: 7443 + targetPort: 7443 diff --git a/definitions/peer/serviceaccount.yaml b/definitions/peer/serviceaccount.yaml new file mode 100644 index 00000000..d642e5e9 --- /dev/null +++ b/definitions/peer/serviceaccount.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "peer-sa" +automountServiceAccountToken: true +imagePullSecrets: + - name: ibm-entitlement-key diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100755 index 00000000..f66dc19b --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,45 @@ +#!/bin/sh +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if [ "${LICENSE}" = "accept" ]; then + : +elif [ "${LICENSE}" = "view" ]; then + cat /licenses/LA_en + exit 0 +else + LANG_CODE=${LICENSE#view-*} + if [ "${LANG_CODE}" = "${LICENSE}" ]; then + echo "Please accept or view the License by setting the \"LICENSE\" env variable to \"accept\", \"view\" or \"view-\"" + exit 1 + else + cat /licenses/LI_${LANG_CODE} + exit 0 + fi +fi + +# Search for environment variables named SECRET_*, whose values specify a file +# name, and create a corresponding environment variable without the SECRET_ +# prefix whose value is the contents of the specified file. +for secret_entry in $(env | grep '^SECRET_'); do + name=${secret_entry%=*} && name=${name#SECRET_} + value=${secret_entry#*=} && value=$(cat $value) + export ${name}="${value}" +done + +exec "$@" diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 00000000..539d52f8 --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contributing to this repository + +## TODO \ No newline at end of file diff --git a/docs/DEVELOPING.md b/docs/DEVELOPING.md new file mode 100644 index 00000000..ffa4cfdb --- /dev/null +++ b/docs/DEVELOPING.md @@ -0,0 +1,143 @@ +# Fabric Operator Development + +## Prerequisites + +- Golang 1.17+ +- A good IDE, any will do. VSCode and GoLand are great tools - use them! +- A healthy mix of patience, curiosity, and ingenuity. +- A strong desire to make Fabric ... _right_. +- Check your ego at the door. + + +## Build the Operator + +```shell +# Let's Go! +make +``` + +```shell +# Build ghcr.io/ibm-blockchain/fabric-operator:latest-amd64 +make image +``` + +```shell +# Build Fabric CRDs +make manifests +``` + +## Unit Tests + +```shell +# Just like it says: +make test +``` + + +## Integration Tests + +Integration tests run the operator binary _locally_ as a native process, connecting to a "remote" Kube API +controller. + +```shell +# point the operator at a kubernetes +export KUBECONFIG_PATH=$HOME/.kube/config + +make integration-tests +``` + + +Or focus on a targeted suite: +```shell +INT_TEST_NAME= make integration-tests +``` + + +## Debug the Operator + +Run / debug `main.go` with env: + +```shell +KUBECONFIG_PATH=$HOME/.kube/config +``` + + +## Local Kube Options: + +### Rancher / k3s + +[Rancher Desktop](https://rancherdesktop.io) is a _fantastic_ alternative for running a local Kubernetes on +_either_ containerd _or_ mobyd / Docker. + +It's great. + +Use it. + +Learn to love typing `nerdctl --namespace k8s.io`, providing a direct line of sight for k3s to read directly from +the local image cache. + + +### KIND +```shell +# Create a KIND cluster - suitable for integration testing. +make kind + +# Why? +make unkind +``` + +OR ... create a KIND cluster pre-configured with Nginx ingress and Fabric CRDs: +```shell +sample-network/network kind +sample-network/network cluster init +``` + +Note that KIND does not have [visibility to images](https://iximiuz.com/en/posts/kubernetes-kind-load-docker-image/) +in the local Docker cache. If you build an image, make sure to directly load it into the KIND image plane +(`kind load docker-image ...`) AND set `imagePullPolicy: IfNotPresent` in any Kube spec referencing the container. + +Running `network kind` will deploy a companion, insecure Docker registry at `localhost:5000`. This can be +_extremely useful_ for relaying custom images into the cluster when the imagePullPolicy can not be overridden. +If for some reason you can't seem to mangle an image into KIND, build, tag, and push the custom image over to +the `localhost:5000` container registry. (Or use Rancher/k3s.) + + +## What's up with Ingress, vcap.me, and nip.io domains? + +Fabric Operator uses Kube Ingress to route traffic through a common, DNS wildcard domain (e.g. *.my-network.example.com.) +In cloud-native environments, where a DNS wildcard domain resolvers are readily available, it is possible to +map a top-level A record to a single IP address bound to the cluster ingress. + +Unfortunately it is _exceedingly annoying_ to emulate a top-level A wildcard DNS domain in a way that can be visible +to pods running in a Docker network (e.g. KIND) AND to the host OS using the same domain alias and IP. + +Two solutions available are: + +- Use the `*.vcap.me` domain alias for your Fabric network, mapping to 127.0.0.1 in all cases. This is convenient for + scenarios where pods in the cluster will have no need to traverse the ingress (e.g. in integration testing). + + +- Use the [Dead simple wildcard DNS for any IP Address](https://nip.io) *.nip.io domain for the cluster, providing + full flexibility for the IP address of the ingress port. + + +## Commit Practices + +- There is no "Q/A" team, other than the "A Team" : you. +- When you write a new feature, develop BOTH unit tests and a functional / integration test. +- When you find a bug, write a regression/unit test to illuminate it, and step on it AFTER it's in the spotlight. +- Submit PRs in tandem with GitHub Issues describing the feature, fix, or enhancement. +- Don't allow PRs to linger. +- Ask your peers and maintainers to review PRs. Be efficient by including solid test cases. +- Have fun, and learn something new from your peers. + + +## Pitfalls / Hacks / Tips / Tricks + +- On OSX, there is a bug in the Golang DNS resolver, causing the Fabric binaries to stall out when resolving DNS. + See [Fabric #3372](https://github.com/hyperledger/fabric/issues/3372) and [Golang #43398](https://github.com/golang/go/issues/43398). + Fix this by turning a build of [fabric](https://github.com/hyperledger/fabric) binaries and copying the build outputs + from `fabric/build/bin/*` --> `sample-network/bin` + + +- ??? \ No newline at end of file diff --git a/docs/images/fabric-operator-components.png b/docs/images/fabric-operator-components.png new file mode 100644 index 0000000000000000000000000000000000000000..dd7713ee626b195af63802c74ac1c62f4135e055 GIT binary patch literal 116040 zcmeFZby!tv*FL)Fl28N$DUt5(6zP&~5NYY|G$`qA5CM^HX|U*yMTf+q7qEau^P9N) z{q}yh=lpZdbFiEW?C7;MiN>V;?cCfIvH3xxY!xEFwH12{)I-z~X(lh286^HXPh$BAnMpEWe z`JXWr2vl!lNK?KI4GpBSdXFhB8LNY*%8ZPFqA|vN2RY#$b6|MSeQFLiI!Fs_{yM{9 z7wXFen{(K0f%H*;?t~;JB!I9GV!D)8PX%W6tG2}PnBLS18amyav0uSXL< zgq(P=L?K$aH5a$&ZDqr+i(d-FGT4GB9bV!KFl;}5*>tO0RK zdX|jI4qP{IUcb15KXR4f;D*)}I|~vsGccsH#sl37LU2U-)_%(j5w!x1IR^Ah0(tR4 z0wxs#9Si=4h!YMfVd--}~f2r+ZkQ2q;ge?~>6(k4ST<9@DdF&@*CwmD~|0$mw}f`<_sP z=mV}B(N~%N-1)ppE5@{-32FTt!yyDKUP0m(B*T{g;hdS_mi4A}j6>lwuGhh_->jbX zag?PzRph2cijQXaJZ3V@4rYIt!q7)W7?E$PSy`Y%WQ)#=(}b=6QY75&tGj7PMf@ZR zDGpuG=oj7Z&5q0ljI0zM$R3y;#C&hkKec?0-*fgvYQ`76JNOmyy?0OPMB&8bg4;ip z^6?Ye6#Qm%(ob5CIF_h$BOF86rF3X9@ioJe9tS)lbt0?o@98!FvQBvyr&jJ`JUN|>7yLGLWSa9#v)mVz=PnJ@R$}n1*?!J`9og0 z9JweaA1Ycak{#bV#yZCDDJ>D-&Ik<;%|n-=R#|!3r5P*782&-Fwy>(Gsfex!{9c`# zSt)T%J|d4`M0pg|3ig)jt@PWVx2bOv-x5iky52Q>wJWcZLo_-#BC|p9#q5jiSE{ea z;kBXI5iDT}VKbpJ;Y6YJ;nAT1F;8Xr<-aJ9#;nJ9GKVFN#T>@=#R$`SDLE^L)0#i{ zn#33*$aM6CBxxe%IB}lV?@^<%w}iCTH(gq}wa`AzQ~}+_l1SSO+oElnDP4gN8HolZ z21T>x4Uk)^uUQ@jsSd20_w_3DHgy$tgS&9M*qGikBGa!h)-VcdQ8RK9EM4AwY)@A|&)eRy%>2a6)!Vo|lc_qxS2#p565 z)H^?LjW3T!f53g-PV$X}gCvo}gWE(2zh9?6FHKVK9t#&snO5mU3a{>rmXKkSuFs>8 zvgO?P;?Z1~nzkID20YL{d)HFLv<0~(-h26s>dgME;mq)?9Gm;rBx)im{1(G&<<~;k zLfFQ{)+ACSWL%97iaf{M^QN<=7u{dRURsVkFFtjY1us2bB|`I=HGTdv&h_R>~f<6E9o)nR59BypK9GIb}U}j^0o1@NpdSa zQok>CKT0XHzoxX&E@OvbCfVKFUAT$2iN?p}eCK!$haSx`1Th3b=5{Bj6SI>}8B=*! zxnCJJ@NHlwqsfq+b(a;*;G2QD!Bkmf8O*-GerC>lu6x&imta>#gj?8?+%T)6FdZT> zD6^Fab^&{MBCu(fzc7k!(QJut2{`CD^E}r$6NZh$`;T{Ed1rRVHV68L^)R+$3fw1H zvRLaFcW>Rovb|-1>5R^Z@f~do)d9DajEeZq{So4DEbLpQSca2}AD4z+7qKr_TvQB4 zez?!YH$O|=z+f`^wLdsrH+}Gx>8BT;(m#n+HS5%tyg~%JjlZF_k+=cn zMC2MU$Fy|jIPVi8-le0JLo*t*)F;wytLb{NAkgjlTV#D z1gbk0KPj&$*)zS}v_9c+wvZT5U&Z!(LA+9Kzw&bOa`rM0tpt}l0X^9)QIY&mc+#kc zo`v4e#|oC4ZZ2&;9ugKZiUaEv=@E*iCz}dbF_lvMDw}U;ZjNyxkPWcGw~QpfYOK_IcK+pJ}FRyBNgFOYS z1@|p)S+MpA_Tz{y?D=j=jYOW3Cg$enetls@<77?~_SMv{5|N&I z@jX4r2#h3-A+e4By*Kp!#Kknl6>1#U#ir{OQn^NR8W&;R$N&~rd@v-v zNwS$r%pVoebn!Xw5=#QRgVpMQ-vlTZlgxGGER>W$jKDEE2nFE|2njes06swoBp{R@ z#~_eA!u{Wm)ez`^y$2Bl3b6(u|9X!e@bl&)2KWM2|N0Xt?llMv`0Wnx^)d(Xx4Y4X za*%#IMhXS4fh5!=<>Y{$>ZZ=-=JqaD4z6>jfwI5}3`bdA7Z8Y$=H?4QPK_EUZxJW0 zHFR8cloSO`9qd?5%p9JXvwGS&-s}Ss_7nsT?aW%&>pv*jyZkB^ARyb#6*dl5cDBD;=4x&656f<@{IcwaT)&VL zzS&IhiM6M>tI{<2cHBnA>c6Q+(bpFSsKREqmsiup$v!sI^u+mlZ4`uzj_|F&r zbKwt~x_{6VVCVib$v<8B)5;qe1l7!49BkcgFx0fSb`|9mX8XI-|G7%{k7c49+yJc~ zi~fA}KUe7d?TSC2{m&ID&ei}9CO52!a{Nl+&*y%zatg_z#dhdS${TEdK7tsH;(0}v$|8@EQjiLV%NdGq}{r@&8QSxB^kz+9M z`5kSONrfwW1^F!t5aXB>f?j5Z=KW*B4t^8}@_sUl${F-)g_KL5(*bkyN!HV4OI1P7 z3Hh!>J^07=e>ni41{pl)dEi-{qH5)kE)4A95nZ;gZbY}9Pt%Q zJn`sRAC>Og|6Yz60QJcKCz%xo>R~DSeFlNw0E=-9inojwK(ka#|C(xv`cH^ca-#tH zUGsYg{)<-szEVaY%)x?#UN{T>dpQ|2beA%aJPjUc@bKRa16c>Kzd#8Avfr5h=OyA+ zfVOe_&!x+LLfeSU;vkXsoD7_QlKzjJ`+pG!S+Yf#)9)})K0|gV)q7|IXRYng=a*q% zNQEAUG6n9o0f(I-iO7tdK4`QLp1A2~p$jFANj|1jhg$Dh#u9v+~7a(JxTzvQeNRa2`0 zJWwGB{6t)!6b2fvkWRmMcmE5YznM=MV1rq*;y5yYLH}>VAUbZ)*plk4KTx1<5&u@B zOa0>X!Gqtn`%j85LvFn8m5eI=Pv``Mh(QE+&&wGn>Yq^HJq*Cg^U~#1|CZUm#<@GJ zfR!(~lZO4h)Bc%jJZT{QeO|XvyZaLwe2Il8uJVyg<|iWi%UB??vp1&RMfnL&TS6?t z_ox43+gO$x<42=3on{^o=K3?ce*r$10T7cLdcQ&X7i|Vn;!QePMV8DojrGWN`x4}v zwa&bt)HXx-Cl)Ak5nY$Eo(M!q{)9O&0hVs!Cd6>-CzSQ*#@jTamVZK5pX32c*O^ns zeDRy-Gu#LdaV8$Ma9T^1_$SWLl>mNny%xH7euAGsfFHDb38VjLxPQ3h%K|`gL(L8= zB>#lk9~@D(1IB}Nt7qdUo3_+Bl@OQOPN#{H{R*U1XBP?zv z9x;ndaaMow5B5rFfTCu=$zXsb)9@2q;e{513_o^ihF|`~&7bscRJCA)8S)b%XankT zN_-~v4{ZJcA}FYW5D1JD^~s-pLOowJ0UiwUCvks5J0&-X{AefN@gF_tj|)x&vSeVg zsXw-D*MHiX%i|1~qK$bjX+90$i}OMizon-?7XEsKrv{jUXTlBU~BbYI?mb6id3IZfW1H>XuU2Wx9 z-fSMF)ZCOwSa%{#S;URe;AJ5Se|`<|%hy1E{0=lYblYo&f&1qCb41I*?6|x_-0mqC z3-7gx#7omc+}C;>U2Hd3ZzFD~SyuF<()i)nq;a_*bbEV=Qh`2I#2^pF?-dvc*UGVG zPTwOCR{6B$%z%uCZW@F|SMte_bAc_5b$zTEZ;aAATqBjne64E2`|6jF0>nPy&$K^c z6r~l4=WCp4eaUZ+i~|FW*B3rNWs{&rrpF;&ph1g*5(ck!Tv<~G0B{A$EWRl^e7EuD z6bJlnVf9Q%0cmqX%{=B}Dnftjl+G2%#B?1kBgs)j(H9xW+TZWothvq3hRl=W-`!`# zo#?J+L9a}iiHmv@w8WbMx{-XuXuu&o*4{0$9*^i|v>*^d8|6og+i|ZPah|MS=dqw^ zJ?_eUmvXb<1Zy42Oqb*TEP16zBT(EMea;5L38aV-)jTmD-R7y5m+g0icz0lzZgIQ zP|^whBJo7_t*zVQlm^HEFv&u40>KghFzmA^2Y8zZm^%f2#TXegiAb{^+)8O%y+V>w3|Z-Q>8`zojPpD zN_cZjDP=*f#g7J^^z!KLrr%_lAauM%VqQdhPN~NrgeBrMTm-~0BFe>NPsxE8_8>>B zC)=XU!PRpT_kW2L+(%nhY$kfzi?$q+xA@++`D z(=EJrW|+X(`C@<*NZtR@bOb%Q3-EatO5!CA3KGB3xs~bwQ4}Kb#i#wU*Y2s@@5@-O zW)U!(9-~wd;7)_SPPZuJMY?2hNa`U%j{8l+0sXuT3%vB%Tq$yK-v2IDsG&OoNl-gw z6zuIoP6kpJXd&8)ja-Rnhk@%Z_MRsHO$UMJf#8Sner`Y99coX<_44uNtjd&v;txep z83bBRqF;la_#)$l(zbKCa^KOSe2Rg$xBLRH4>9*{&faC)JHuMb-Z<72ukQOl%$5q} z5T1ZKJebiS%v>yB+&D(Ok`xT5Ip`zx=K(wb?Fr9xMCaHPjrrLlgl5Daf$9#cV|UGC zFon?Yow=l^H&Q&V0}WQh`du@KAfkU^1qg48+D0-XAo|C;wMjPAbdcXrPGB`8>5gNt zqmPgGxCxCxp6FJ1mn#e^(UmoiqtT4+?51&IloL!OH$MGFc#NrNPA|PDTmt2PB}sXR z>>Cm{1Z5`MO8ODB@frYsm)>nDTmkUlL&(ZqUi)IS#D=Hd9z9XD6tEy!`%)066!l%L zkgL!}O7)q5^6tCkd2%7Yp>F9qW@7X#A(Flu_lSKE!y!phzMy*MBN5W|0Obefv$#7X zrz1s51F0oq6-w|f>Kmh)MD{fXJqKOB-8yHQJW%le5&p}v0I{_PleiuRihE)J`Zt2d zk9@faI~V=@RQ$-nEaFawhYtiCeUo40AzX1=dx}5<*>7s2$1#CW=zBvMUhm7OP|ENe z7RI(*BuAT?)u{Ns6v$a8&&{(RbS;D}p`KeDCWR55$urfz>R9$h$Bk2W2@sI)umDiM z#SoAP#l+jg8on=6M;7!H^NB4_>EO;g(=40$U;3?Brkpl3*L%1^J6F+K>vdA>iWoU` z2dp}C6vq5Z88qgv-~LjVidO*cnXaN}m;HV_70TY|j5{GD)q10;;)m^A+v85FB>>#> zh{BQ~@XLHx4lKOAh^be#xiomUfH;z--4t7oFG)Fh=Z4XsF2bP&RR(qGSI>{&$c7DQ zgODJg34(t8s-8vifMJ9{er1aPrXi9g3A)QL>Uos3bj7NWM(RlL!!%azItD;tx6(5_ zf^7hB>2JW5_lRgleFT6@c~?ND0-f3;@OwLk3V0`#D<6xH@Zd*-7t@DWq*k!E8T;k( zoGP`Na6g$YFe@}VP`j6GG>%J(L?yW^jGrasrskJ>`1ExK-sSLPrbBwPD=Eq>#I55i zRu`=R^UH1i8rm$GY9YjKKyKTEk1m>kGVlR2`@ABp_U0q{N8`midl3=$&XKFdol)s$ z6IE*xP0n14UZjRM|H!!1mEvjKMUO*{hk)IwzeTS)BM+H&G>jA|Si%LH*moNwJAlgj zWoY1d@HZa9vwW;hNot50e$e7=ey`({jK&Lm;RY(YAFgBzxYA9d{R{}?g1lsPui}A} z;@Fs;@Rd-u4D@xFD`bb+vzF6Ui}Od#jW>*_*Ue-&tr69cik0<1EF-axx&eqJk7JAl zn+9EK=E6s-Uklt=^a+Z=g5hqU{GMe7>5m?WS_81Gmpx9TH%Ue}(9%j2^ERb;I~$_C zGcqAZ1LECIWeV3Co=}xvij{Wl_RI67$qmWYbX{dspflrj}{WW0&q_R)Kyo&YAXS&mDw(h4h_cNiwK<|t$d}p33}B;V3Ic093DN1 zOOA5WMSrJU-2HZ$Q;euZiesK!OxfDo~$@GjX<1c`w`jN(OgnV z>i6$IcF~q>-}0G1x(whc`zmk)O$DK&Ux>F~{cE>e1_Nrq!6UvjDW@VaENVV@wLgd? z*L%|v1XbY6ZkkAc!eMVmp$a5_`b7C}%X#tlHvwW?NTzj}Q9O{UOl|v@+kOleRn}YD z96t$gO7(F68VBBEc>0Ajw9Y&#C0!xFRRHAz;GzrgYT?Cs+UNundnAahIrWm#E=^I*2p8 zod9_@+XfD~#0;q{s*ng@r%Q4TAo2KqJnH zp3vvfw2u3aPt}3h%lan~;oXP%f8OX<7XFih?7JJMLUiW8Nuq%H{g-dzp1>n=+~{*z zhX35;Z|K~)83V)yl1BaaJ#Vh!k>5-^mPOSt|74o~KqBZd&}gVoQ-=Jl2L37lF!$Nq zqeuM%g*ckN@;vH2Rxv zfQSk5ezeN}GXvO?o59}r&O7%1!sl-sV-Ns?z5k~tIh1ZXiLd`aL5}_jy$?e-;C60b zZ&_E-E+sbH{U@_;9|xwR{yZ82bidy5_@#ImGs@$D|JTodzx@v<5no65<##QyooSUB zd8f=$n_a?4z%;wU2kVT^PB6(Y$*~)7ye*I_?3PtxSTDtE zKhK(8zgu<8WK*l+5zg1gX`ySVGV#t6^EIQyE(c986(%`;5 z%Y;vNO@E))o(paoLMq^G;0)=pUxfsnGs;Ik_xQ@7qTPry^P@-jbAX_{xG~BFk^IM% z=o!LZddHVOPdLqcD7~B^q2`0_4)OywXsz5F*&KIz?zwd9{%HCE8_lJ3TIYEU<( zD-OZAhVTXY1I>%|i7nBo$;30uYZu61=RI)>9{0xPqk+zQHQo`)6&gp*ux?l z#64)ss8Jb{Zz)yt^&wFq!n^7~hp&&0@k7zXbOv9%paeF3!zM$V|3)3K7 zUr$(FU+|(l%&a@E+a|pw2fmgc1)Yw#Gt=Va9&WO>x0H8q+?1UXj=Q}2rf6(2-J*IndaNQxe)We`hO68ke?4uS1K2T+W5*Kj@OPv2QTy#a4}^!7!` zk0XU)2H<-yAgw|wDZ(=3MMuFIf^O3u>Q&6q*Uflda`vd-P< zYGdzKA%oqbiN5ET6r-nzmAju_VW}0W$`Es#xxVRef%CZUUTGU`4t2JKd)-J8#D057 z-bevh;ER)npK-##u|tQ3TlRFTa#N-0!?0y~@x>(^WEyE?fP_Y;|oqCS#cXps~QM*+yTVEg2 zbKWu|)JfCX7njdu{<&ahn6<@X&f?T&N{_)qzsPFlnZA=@uCH^zYWM_ep-OgQfKu-v zje>TUoVIyTVlkiWxQt%U2QfN@MptW7A;aK0w*bo&W}W6S0WGNhXK&ozOah14!;R7I zH5Q>&As2_*H)&DX{2SZ7Kjz@#C+K5k6Vg|wE|Kr#;`RKEL{yzu<)+Ee%c-Z{S)iTT zbj#b+qXb09f07E$vrJ=JP2WMM&JRTONWN^9pD39%^XJtQLxW$_fr;4YX&jgt`4U(e z381V@ugZFp6Q(dvZ7xqf4~~>c;SPrjM}vzPUr09-9GJ_?B51K1(=jELCMO1z$7^2O z#za`ieh@x&?kctTShg8GXXE$nj`O^u@Y?JYOt~_+_E}oIgasZ@-U1VXo5z)(R2L67 zxC-*eh`WeXti*pi_r=4`eh363m%gr>jDipoLd34Qnl|RxSP}R_}2DI5QDU^;8 z*luJLJB9d&P9Dhl!)gT$&uR{&%Zc)xded5) zq-LwM`1##eUcQq{*W)x(gw|g~tg%)GLmEGi7+)PX8Gp0~u6}g&+7L~y{h;pwdxTCweKz!B6p<9H>v_A_`qyRTTxXP${+KU3tKe{ith4&M1{HCkH0Q=18- zsRs;-$xByiwAu=IWa&J09B8%YE_N7gCd<4wCHu`@EuKEW%3@w+F_Pu720;e)aluw;Xs`J{(rZ&NPz_q|-qNs!Ikp&WOiMF$wxC zj|f+xIq&FSXr)m8K77X@m~3^6uHo$1gF*5boj2KUku}yO_)xqDq68yjks_|-`7tRT z_RH28yZ5$pHKUCQbtL4xj&EGICm3^+aZZ<&N#VDYT)%%|Xj`@3>objOdphRg{I$t` zAFyUS{<8%quj8U9=M~2>JYT(5OFc1~XmA-L--7pxcbo`^nKQ>dH4cy-$YNEqktHV%BED&Pj`iNss;;M?0V0JVWOExG?a9?_RU`h^0)`g1FTzDVf+5r2eJ9uF zomCPsjY4dar+NDBjT0#D)l0P~AHXS8MIQNa*amYu=xaN#G=8%QAZ$hf20u*W*&4aCQ!WbeWyXUWw-_t%B%H|HR%KLR(kLA40JCPeC@8=X;LbVE%9#(9wVfT@IM=UPNT6~H{$gC z!Mjoii-9ZM<6R-E+Gj0{wbj!|4Q zpHGad3m#s+;$U&xc1~=nt(kR`Aui@EBySt65uW~N9b$dC;x*;7zIXfd6udgHQERTa z|Lj$Uxy5fKB`^;kqBMmYWjQrgrv|Pk?7yI~9GEn2*JPI%rhr^q5S%t|95hR%?{d0* z%CYL$zk1Pj{Cvu&{P@Li%0%e`kSguy>H4_x+iLa-#onp34X$k6Zn@lX{(3Zh=k%OH z*z{`LXH%kh^&+!AId-eOmrYQ*p@3m@EuzjSthaiVfbF?^SImPXk*1l9O%lzK-7iN1y9RATF_O{Z|=Mw(smlVYC@pI>y()_nO z#gNKTpeB4L3GwI1g7r;RtG(4R@zE~6W!gCyvuilRzvXMGRW`kjJm{tm6y+`Ma#5U~ z=7A``c@QNz;(f4Pe7C8kfXv}(g+k3@L-laqN>{3qbQF}yBk2PRq|av{o=&-UC@60= z(!QNRv%Ez7uVfdC7P*cw?nD2a0a^Cn(!=S$;-7tP(*5kVhFFF%- zKTn#g+RVnT@;|cZPNWN&)hYOx(AbdTW)JyPMl6I&#A=Wr#F&`ar_0s-+~p#8s;%34 z6vZ!W1l;>sWgg=CpzJe!>4{CITjb@MUnRMAj-zP9M$#R%wYs=6E}8x-7+9~kJ`d>} zS7>G;1-5mvmsS(NETW7ANvoMTrx9aIu3nouly(`oK)HlFx(#Kx9EC?#OWx%YaIQiq>{zbAP$qNO>z#w_ z)WJ-;ri7+ti_vGojW4p*b&5T9Ch3gRv{v1{uU4gj{Bz8kY0;bY^-aM1a0}zKuk%dF znNLfJe#=*1at~dRFaic6+1-Y=V6zt5weS{qggbE#=aU~5RY=9mMY-;q)pjqaa?M|0 z^ARSF0QJ@qRV*GIi(qtcXawSmaHr>mS+#-{&m(Dp{W zX!t&f_gf&%caQcPRR=>_>69JFU!yj|KL~^0MnP#Uy@pi+mV*cft6gque3P>;pjMJ()t7A3BswA6{VSRpE}LYW_$_aufNHBIsvhhe2yI$i7b5D@K% z%AA4sG-{?RJ!N{O!A`DQb&C70#5gY~c}{C+udHX)myHPnBY0h)-fo`e^Z-(a=uRS= zE^6Is%Mr=tdM}<4caG(&{hr3X2zOPj$2INmZ!ZW)lCE|qk764=WHsI-14s;j3`iWxQ43pIZQs- z(L2P$HaIJ&*%*YAvLo=nsGKJGio$EDsqP_fS0Jar71$4H<`lOBTS!VAA%AN8p_BXx4xQ!Oy#azh^3 z%V8ps{E^GmmWdfGV?R#0Gx(We&6eAOtIgN@=7(cJaUDF5lc1uztE$;bb&R*x8xITA zpw)I43zl#vj()oO$az@V&Uv>AEJ?aIODk=i>tpp;358qvoDenb!Y=G29lQ|sxt|^L zvfpxsJh-E!{7RXGyQ;}RpR(SE2ZoN^0(3q(ZBl0J49GS4M|rg~f*mf{)NNyRxh~e1 zG*9W^%`M7>yGOfTaM5Gm+D^T04)7%Rv3KTGt#718*}NiQAIVZpV12#Hxk)TWmdCMo z!-r+-X_myp{O9S(KHcG(_g5rGTM8XUwD{TF*B=rYt~x>U*buWz;9^rv=2_NZStBhB z-@M3tLVtgUV12Y&bnj#oUv5N=Fqkh8UOHb8o%ArSkbvmWApWvdE5rc z++5}vQ0nLJ^tP-Pb1dke+s#yJi;|s|HV-uhxfs}T^{0;|zyXW$b0)#wZcS7=q#uu5wRIQI9UyXr zbuFby3A+lb1L=~leUIvV5*JRzkw4$L9|KDlsDALEBPz&Gy5}7SSLrCP?eWV)l1Ywl z3RbHx3P04=1+O%IjyqZio3tbLByFQh6V&BMP%Z?T<(WTkA7tPs27YkElik zjhQe%c4F2?*B0KNu#eTl6=s9_k4Zh}d2do)SFj5U;PbCL+~%a8_Nd)`4+$ukVB++e z-?L)bDtwduaL$-)Jwv;cXV33s@wUy}L4L7$y0w>K&!IR$a*g#RuVsDO5k-wRo1@kj zQ`!c*8h9Y&eyfG3WqPIoq%YGjDz#@8XjpctTaE9r1ZGY83K7A$fP79i+iXvF>eX6E zRjnm27tdSk0=+)5dnN3x!bL1*6MDChDv?4?3oWlb@u68qip{}G;6)>##-zjr+djNb zOO8w9_7rvaW;q-TMreMAFYGX}BB$GERl=MFd)0i>40$#4T4~07NXE38mH6oGE*YK9 z)yYG6_gPz8a^AUr;^!tpg=oI%@Vb-{?!(djw{rtc@vW{q&-BEmI$%PKNwLK7Azex1 zTxK18u_N!6OyfAEFa=EY+Shwm>YG~VN82vDYOJ6u8EdbrP%pk%f2is~;p0lcAkAIlZ7(Dcc?}(koK!tae8mtVMwh!dee7zf7ZD zRDU{eD4oE^olMWLws*Oj7cq{L#wK@~`08QNInZL!^Y9F79LbxezU5%th+Z9NS>K$yv)R!cA zT8|cRNWf-Rvzi=gajZ42Rm&y)^iyZ{SISeoYl*h<`j{d=VqczQ+wwZDGAX7@H|V#Pt2;KOd&wsp6F-+^#(NPG$_ zp#E~@y=K|UVC6l77nDj$;>G2}=U{t@_Y!zbOjaVvWXMw=MH?&0-UzuLzO329$l9vp z{`3STP7Gn^c8BuxbAhs=k6u^R@&0TlHZ68HDzRONHjWoikdeDqnNN62BiurD%Ny=O zCu@_f?xQxe_>qhmy#E=(cAu&ExGyQdy)WjtoX?|?>-uc8TT6TOxUyAU$g?a_u@P=( z$=J}$IE^VB(9!ICvZrbTf5vA(H@6U=OL)a)#7mmS7GQs6WE3#d3~QMklGAS=JoB%t zbqIRoWj^}`cNG=}?PV!I!x6I1a?QN*5`T4k0HlDN!cB{%*<8BI>%hKk82neizOM~jr;WKpt~i} zKAug~OW#6?jI~sMEYSwG8h24((;xC&gVQj?`wG;X$rMtn4Bk>PXykQ+1pOF^-?ZtJ zDJXZQMb2J~Lz5N=qv{wRo-t*kMq&|$o)U9uE351oMtel70{<5aL$`eS`Aj0i(aD)8 zf~z6C#`=|gA=X;{M2ZvF_E()kAH^I)CdbXei&p_%6)JSH_@gYBNVxSelF7#Cn;6eE zS`_qz^_eDIXe>-K^~P6|y^QteO3$+(>y`K;u(>6*<~~zSw`s(M{)uXM4KY^cky5t5 zK{yf93UiYs_sLwNxrk+A-6Jr^vZ;c(J>vrk?9h3I1V6?8&f?HxR`bIBc#G+3qfv}_ z;f_0Vd#)lX`IwjI4+MG_6IFk_M+d^Pdb*7?nqQvjN;-?%w*CO$!jHMMKc#M(rh@FG z7QUZ}%6TtoC@{kzYqmEG!`(`I;+TJ6dZ#j~l2kqIc*nrft>2p`<5V0)Vr;VPOHPLb zc7ZUsELX$7eN;c(Mb!fM%VrYUIrG<)73!k+A0usg%)fngF9aq@ABkGl+?e;EM3FIq zkrZ{W@rTOFWYQ?B{XMKkdbsdqE*IVULZiQRw~D9|$n%H`_$oSZp?V_NP>>B?ln5XF=(|v=P{`7ez#R{cCYeQp@( zI|G^6;NTiK4e@Jp)QTj%&JtFwPaAJ(T~oLoj7$@3->^4kLa^0o8lTE0Mh-Rv9yC`h zJUH+eM^;YP3**_UPj1sLsVza8%>$N zKPIEhK;GprwP-?0!lxa)O?E)rYJJEj)@2CiJH}K#&su(k(~>_shbPy%oCeNWINGU(9`|R&3(4 zaWr3OG{c*oY_p4r3fE$U)XsV2>I?@P6(62Vjtof{nv$jSsTbUOD?&19y>%>7Wws&j zGo0GkQ+kZ18Q-&*(7s+4i!1d{13%m%!&?2agO z#Y(aMROb<*^VLDY7C!!Vmz96Cu)!$I$1Hq9bc&Bm%w275@M>Jwu-ab(bom@vLa*Mz z76pwbWhCG7zSq)t!GK$;L_14`!=OZY51W89#L*$jO%!<1wxD>OdqA`^y3-)Ha>xzb z16eqZI3`p5*i}ors68j(e?go|iDO78R(9T%t>zb5fUS>3@VQFJ&du`Ka`!^2vl zMOZYb+m2dFOsAXTwMr^|E{Ei?+#@{ZWT5#`4CU6)U8E}ienK13-0SV>{qk|Wc~Q}T z+!QA6nVHKQtG?E%82x_Q=7UXVQx4BW6E^HFRw-_v*1dfByUe5DLWS>Cdu zoO2&lQbiZ0>YR^I8_t7hwUVLJNU@nCg(`H0M%JtjZ2-9PsQ$w8qL;pJ$M_(?AZfKX z(Y=nwOj`eSx!$sM>Gv7GCcl9##o{+YP^ z8}Cf59kYY0J+a*d{3L@;^!&NGI@xH8eVb}h7tSOIReX0r3?lSjpiwV)$D-M zSS5XT!QRMCAHD3Yo~QIL;v3LY_u;oPX(J)#{s#1FO^m}|%&$+f#6-_KEsguP#`}DH zVRwjxjxh{_Qq>9hQyumn`3)zf?!Gge_J@5AQ7hhrIe$V9H2azS;&3@Y1dD$8+*oG16u|3g~Rt`IqRK}Wa;0ma0)Sl3) za!)hW^LgGbuihYJpY#a~sb5>hM+PRe+*|jsgA?dAzofx0WnTD|w-O}ME!Z(XTx41# zrxmZy6uu3+HeOhE)m=z6;)aUtB=G24_)OU7)jhIqGc^`_vdmg}-L^YNHhx}6UILk@ ztlLSLKfLRjA6QlwSijIY-8rR>9kbvCwL5;6QIHk_D`;}{Xi(06(-0?~p}cNcSY1Gr zHz#!MRySdP62k=*%0qBGKes%em}^bXT&-GF5RiP#&(!}$%eyOCtloC&-ci~4f*)F@ z5y(~eM89uAB;evKdes=3OSF=&ogIrxYvlm1SP}&%uBjI&F4guDk4j4Ws#i>|d(>qX z_z9^*rA;jC7(1BMZr6jG3bh{sqmiwPI;E^Og@aUbzst*OSuZbiM3HZE4+B}_@z5=T z#yTrB7@s3OyoJjfzH127qm4kkUxi*Ffhwssn$%Np#rtvf9v+f=uJ`>Qx z*;ClTLO>_A<$#S@#Kv2rrp2)YHirSvw#7Z4+q^Fm#gAQuqyp3+d0L)2b2^yI+KcPM zdCmuBv)Am=8;_CuP1Q`u5OZnvY~$(7=0DLqtFI9?WhCeIEQn_|6qa^P$(U5jhRb-O z4A#Cn`oe*f5m<<70b$W$9IWv+bcc#O&6L*{13QeUYF!FUZCwbG4z>p1_RwVqY#AK9 z-1y|U0$$J+gU_i;pu)G9`4(2h#J5QAeeQejP~cPAA19D^@l-FeGr@Un4@dS5iO?KL zfJlMgjvEPeeRkn!zJet-kH*_cx0?xmk$s~b+^*D)!lSlu+ASxF;rdG+yu0uro!t3= z-)e~$MvfQSc`C8+F0>QO_LbJ)L-sbz2qYrtY^_DFK zUlok#PWzzCN}Y8+rQlcMXrJ%giHS&J0Edes3)LKDqaJ&tP-da_SSsq$Pw!M^7N}&| z42CzIAB&?&82L0QyVfnvpt^h4sBr;Sq7f21^x&>9)5$u5d$CObn$7C0l-L#P zXj{5NWd2q8>G{&))pZG!HJ}su`6cikN(Nzn8hkQ%9B2kIYpO;Z!R^`aL35W!XPs@Jgt#y%4ht%geR;xu@5k#&~@~!45uW|$*vTDQlx_LJJ z2T>G2A&5hm-UMZGaElf_axO@ zX~^_mt=vhb(0OCpEqyiAx+Qn8e>b^2t;5&LS>As%j*3-|33FDT!Jj4knP7R{%6R35 z74?Lkdi6_UtP~HL^afhQlGdTCw?#1H+UtfLYPLFHZy8LZz+dPO=JYoVXvI4!JiUo?aYW>T)2qI#wGWa_t^S>bNv_<@u?ksGBqT z9juL??Qi<%d_bYcn1j9niNNY1*W2Ede_3vsiEHhtYq#3n*;r8IR zg+Hs)e+{o6>u|(tesKev=ek+lN$4qPFbi#E20LNfCL~K9#2Q+1a7#{J#P!<&(ED6`NSUWA@5mFOJ?#!pyl5p>7~qGgzUSN zrrbB;vKt)T=9#FstYzZk_WHqpKdbUZmBE)f*cJG}hB&sf`FlVoK9>6%IwAw1{`OvN zgBgA92XBYhjdnfk1H9-DAZB!FNAFSbSEtVToJDU94hL<=)BWg)iO@Roes_sIhxoUa zgXmC3iTq2j%XcidIxkZKi7hgxNHNL0{u?o(%FSF~*e&mN8_V?aK2jNyArrT&Azo?X z)n{i2>GpBpO+@2w=3ZIy8$m^YT|9xDKGOHAVa2?hWM?n;B1!uLj!a8FQ|y?e@Jd_$ z2=DbjW9wIRnpxa?O7v~zn<`BZ{O}o5@?mc5rFB*R+=@raS6sx*rD+Cob8^Z?P+P+@^FUga_Am|F=GjI>S* z=mdp0C-EBTcX+EV(N&dP3^F0$RD?iYOR&BFB9wD~wi<@(kQ_tB+!XhMY)5XA;|aG8bb2Lat+x3o;=HXgJ5>r&Srgw8A=NNcJ2fsEmv}Sh zkXM}PF7<=9u+(ZZSt6eR?A{MH^>~F%bT=WxoB*)miobD#Mv-_K+Z&ekqqU%L(UF@C zNc{Y|_G412cpk~z4&35jK`bTf6zZcY*1oPme0UCdr#!%%9g&g+*Tk-kkbFsNIP`;T z>o=~pn%&!f$vZe65_)XW5Kl;7DBO74LnBE@%xQfFo2v-0r_5~gf@EXcUt)tB*MoUh z_otgqN}9(l%U6k|t`V>3XBaqg%>g^CXZ+ZfOFt_^sN!V2rfW#MxPg~5r%Jbad<{3f zd11~#GtqqObgJc1Rar_GIR3!hP8$-pk!ZsCGL@N-UnP@4TEyS5wD7ZQm07* zLBk!K*D8M0X42H4p;xe&<8cO$|5h`&la1TK-_pfy1miFrL;MO*EI!)Mxa8?v-VHdG7kHGAM{_>k+I2uF3Z%J@z;l)ur~A6irHFrj^X%(~|EX*)BgO*GKo zJdx4jxNxSBo9Do`=ICJZEN6gqBDdjqnJAq@`AxlOoeC9}hMW-mk&jn_c|d_M)V)Bs zE4XP?e^EKum)672Q7us}XmiH=`hz7J3ZtO0nWR^aH9*y}^Tq!7k0gg4E|^q7_o6-K zqlz;7K0|P8UuksZlWoXZsOsG=1Y<(&aHM8hhQ*O8k_tO6s zLJwKIx`GjP%b$m=Q5vXDbS`~(stG+vI2LuSc^%!*gLiQ+>#L5yNd2moBV@`xr`b22 ze6zF9ns&8K-?_wpC`^ZTrg2f+`9}<#YsL~58cz$9j2#`sO4jANE2|t%Aj@wqj`7vk z8O+r7?AF_$mYuu@>z}3Ar#Wx9X|DcQlEs+}1)G}`N^7=K#|-C8x{CXWFUpJh;X)ed z1~s7~U=AkvqgD;L!H35A^6=DIuNz=gy4FxGQu2qPUDM}Nhd|cjJ?GL~;t-t|?VD%W z$Hh-;(yvSK6Uw}ZZ$xmbd*n7phix>NPfmNLkDhau3oy!)(oxHfIK_O)b)#**92Nz8-f|W<&%`qkGC&|o)&K-9 zHK1?1N-^pnDMl4FA5YSSt8qkYkTlikNjQDB(S`G~F|B=3^T5Ju?cGl{&eHe``-e3c z$mHVuOsStnav1m80MVudl*n8o{qv?KM5ARDYKfZ{T(vIh>f?Pk^={`$-SSh`M%Uh4 z#Cs0e!IJL7bC~*;8Hd(;;b6Pb$|4IFwBgRQLCeB7XH?UoZ(oJwyxKrtNRCP6z>U*s zj>oUGd={?fQq=KI-jZ#ktn=PV+D7<_f{*>#Z_ua_rwNP9X>oPFkdYuw13#5#NOZio zv8P}4U{vHKrc{lx&W`!~b+K0D;#@$^l4~f3zE4HJGc)7RQ9G=uOur848RH@5f0R`} z_OYtr_DKHJs=EbBt%4Q?1->%@>%P!!xZi@?!UGlt48RJ z()-<*SPdnL;xT3$2bdxwmJgP@^Q960)&jm6&=6++Cn^U8N6>{wwaO24b=~x9N-QTg z2ZEn%jt+v5asl%vY|)z{0dveQvk|%C-*$@TD-yA{|&Ax83{Jr4v7k;TKC4v3qmmQseTTSJ(!Zpq8>{Vbpgxe1qB7 zp>;6q;82!&tlGbB#r;R}-brKXvCw=G(rd~VpTx7%+J&mql}+wT70xZ|HI^l^J}oWa z&ghjUEln?>6US%7)>OtPdN746oN`zOP3tKC*VbJEp=Qpn1#>?J`$7 zsrhkgQ)gCJd<$v8-qRLmI7rs<!HgsZFFNh$hdF$$I>dOZdqKt-CQI~#TrpF|sYZK-?qFR=+)G=l&7+ye>5(Jb z9c}pvyE#JHgGN8AE|M^mS`B=n>nazaA4 zPtUq<{XU#g*g4JydDMz#jvWPxSCTJEj-=&n-Zih_)2nS3gV$0Sq`Iy>(F&I|9M{97 zo{77+zx06+JZC8(4j(DvY9SA+R1?TLx&3+D(~|RckXvEk;?sr`QI1-P%m;;2IkHxj>w3PzdR-nen*|+sDDRt%)S}A~W!Mk4 z`Crf1g)n@nMNYf8fMwMec_rC-iSBGnV?T0imbJEzm^WU-K-CJh3La4Tj3!PzprLv) z{vbjPkxuJ?SVBk`jSnQJK0l-W;{?)70Bg{+L2|BPwt#%5uVm) zUi;)-@tLNyx%?J9?e@AB5JR&ay^kwPYCxo27k7EMCb|D&N*8~=W;bopVk9{9o_l>R z+Ut8=ProW;${yvYYJi7*T&1$hN1vF@q>=1+E?)fvT@+IHQtdZxBY|fug-ahX)S+j}(BL$;YY`cCAEjS1Tm3o3 z$0yrh5lGp{dQZ z{{xM%hN*YE0g081J?EL*zeurW&6TOYn~pAA8iy?Iphv|Hr>^`MAgjBc3GL@dzC=zude07F5uvlkxiNC#0x~;aLOL(G=PO5= zk*O-A@F*4*OCCx)Cja}K7L#gkxYHn=Z*UE4o*9M*AD7RGmr z;T`;{s$Y+?#LS*uo%JG+oZ}7cjkwVeat%`{=r}EVEcDf-$J%M=BEr}Og2+6y$*4c6 zgC#jj&cp*{T_3{I$4^>dpO`?i6H`%r+Tf=Uo1xOQ`bp_E>TbaIQ;K&afzB(%-8NG_ zI1L;v@3ev}Czm^e=IaW7%;@ChFLY|KXmH*ZCN^bz?vy=QXkAz3P37DHRv z@bDlnehiB+0Hyhs2Wxvhg$$bb0MWT@$c*|z+&wln*PM_Xvn4ORD*poeJ$iC^A9A}V z*c;1JCrqO396brneo}Nd@Z^5Btwv4XCHSt=K!@|7+34|MwQR@g>GCLyBi_+zH+Az6 zo3V}|Jot?9%D`0~hA#5A24r^FD6NmKh+l1K?Z_Omi)aMN4j0}B%?L|7GeD7o{UlnNK zG6HMgL0Eu_Z5Iv15r(x1l8nW+teQbz&u7%Buuh`h= z=bmR4Z1uhYn`agFU10P21d!ouusD2H7MJ;X?NMx2 zy^po@E>3dmYjM$Fi7Fp=V38%0fJ-oX-bbNAo~vULZ@g`kx?)Kc2cBp`MV#|TQG%zj z>5NLoe0*;9KkCFrEjJ>~J71`0`{V}gY^>0aHj14KtlWe%&&2eGKu#XE<@BzZ6n9ve zon?@LZv%Brh}3hSv{j*5?T!?o=rS?R9YniUe}6E;5WB43w7)@px7N|KrG>AO(VS(u zRb*K2r3(}~K`~#G_;+3d1+48hFZz@A*pi&OG6t>IThyhzyXcY5K-R9ed-mt~$BjU) zLx)=p}Voq){PE=vt3!t@tCfa;xFwz$3dPeGnDS?Cwa@prfowGw>Lq!A3_&V-BA z*TP7R(fZ+vT{~13j~>Vl(QoIK&h}xM7U~$#ps8$opu*#$YweBXNn>!Ry?5VunT3#& zA9VL3049-_BIeVLJCkYl6u`v0OAOo(-@wiwKlgi%*4u+8)~HI_%!aS(8Hca1UFr0y zB8SUtOpz%Ue!&4Lz?o!yLhmG95+L9`#$e;+1h8z|_ogFD@AEiNYe@Rs6$v;fh&@1n zWpwtl>rDZH)w&uTloF=4+GEBM1?zjY8P}Us)uFjOp8V2o;`n>(K$?@#OF~%(zbB|h z!;4{$b`y;Y&~*ckNNt7f*C2$2PSbwvzFu1CUy-apIJU4F3a zbO~$AaInzR(33_TcW>;#lz==#`Y@y4{a^vwxRIhWAl8d~GIX)QJb4<(JL+~|TP(@F zzU3n-P8Zj*SDD*!{!K&wp6ch_0L&&yeXyCs?7lLWA_d#ypeLl9#KE_Nm7daY>U?_v zl7@}dfrbmrbl34_sD`S7PoOJW5xg0L+1Q*nNiqtHlaLli%=-8x4Kt@8GF- zVWjw`=(k-pxrQ87hKKJ&aawRY59*0x>YFcRgvi{OHsR75`75X|O5qmSH%v;6B~K%A_^kN=-qvfSW{JO22V`mCe>VBmu3|fV<9a zp4-Hl%SfHQGV>HRycsyU!)JbLQ<_)gyEOz6Q@VIjLxbcrwL25?bf{2c>W~xoQLP8o zR_^%`@;&T*5lIo}xX@&5+~=V#W{CwX1Vqv%){Ic{LH0Rw>S zmhp>eZAAioBX=_!M+)(cRTyZnJJSqIJU*uX&~2k<`A~GlnPGS|aslNh)M*$$WmoVC z#Zd3JeqgYeC@h~;Mzc~%w3#Js5O0hP*x6kV4@t};-(Npp(KF9C#oiDmIkkJjC-$gC z86|z&I>o16J>5Ms=MZ{wUJBo;;I-XwI^+T#DtVZT!C_Sb!I~pK&6>tM|k~CwpX^8oaYJwWcJeZ6%E2qw5|GBC{3q=b2p#(uxck zZMM0}QeTQyMU(rag$&_q!)n6%`Q>ZZ&*T`it?ng@)#jPLbW>DP68uo*&=u=Nsg&yg zAD9v|^0tf$fv^6K4W%MQ;ZI~Dq>RYBqhS>yEsHcYJV%=0rb_PAn z8L-ka)fBE_u<+=!XNIrkuieP2iOlA-1DsHuOnb$#Z0~{GsC2y0UvB zv0GGsj9JFiq#QkUA zxa93pD@3}#_8!sTqtJ?F`Dy>`YtqH>GyagTCn2X^N+F)iF~gdisC37 z7UUkHh(Jdg$boFb&igG5RCUk9YSJF-*1x!A5Mmq8Ts<%9I$@bwG?pqz@%%?bqf1A{ z2~FyGgp#zMur{x#&#RixK{!enon4{oQ8m+ZI1@tz($5~^)lTuI=4C+b$@-rcyQI+ z>kV$OF%$9yR!`Y=3@$4Km4J3a6NBBpxhc;d4#)(#JIDvLtymjTiPW7i&q{^+qv{>e z={xZB2#Mm*FceyK9uLjW)r!5H*wY`k>#;Ynf^| zO^O>2pGod%w5BtrskJsa>y}by$oy&e?Rhu1uZ^v=ETi6exa^N%TSa-Trl7>~5TrzPT@b99V*`Kr#NwpzU_SPIj{Yh0j>a?I`V>Y+h)V@^$ySIp;j2ROTd_ zV&N5Vckg34Dx{B83V4JYg|K@hFScp!>t%Bg4+-%x&VzkLq}z-y3AmL{DHgTx&~Oq- z8@)7qxe^*0kgQ+DW1ysXf*(3n6E=A6a)L9+)$)buSBm!4*_RuwrQVBVVJY|mrQ#N` z%Qqv7a<396J(=|}Q z@ZqLYl*g~UJcrA-x3iF_&Va3DnUr5x1LyUkUGD&sp;3^@6_sAk|$7u{G7(s6!0jh?Y&a@HTv z8b{{a)D8cMBm3~itT@Evw)YnTT-5Ix)&KeQg8i<~60wh+mtTb4R{!w>aPpf!i#2U% zft#TXNCNWD7ykb0=evRuF7AKf#~rQA*XsHJsi||$C4X!n%`UOZ-1S{);#2<&kt0)1 zae0BZT{j(XW874FF9s;H@gamQpN2ampW$M(zc_tVMEu{UUBGPINLc;V1K#Ov8~ zzvjIIK%d5=T0lS?=AU*&<|Z3UyrHn-^?Ym_yc9G06){c5Z3bhS3$#BxKaVm z(-i;mo>M`$fqq8U*@UCI{un1{f>xw_#JM6sqxao^F67seo|qFm3yt2R`Y&LZSZE#q zYJ_#gY5+i=`^TmHZFxaSmskQpu9f8>|2FbZaD|^A|DU{9A+ILK_BX;8%{#}SR5DQv zSq^*lq}RQ_i|St!2c9Ac@L2ayj19k0vhULD`=O~zIUx<>(P<8E6|B5>@2*@i`q%x+ z%Ri-At*iZCp;><-pjm4n)Oi23y}(}-$jAqXRlwEe{I660>x-vsz#{S;IcWbCO6#vR zKhXxDtF-?+=&C#dbd`2h^oM`N+5T%=fPEm^%l`%T_t$9u_#~qRptXnRPj-C&ODzBO zNs9pKto`3XV&#)w{Iq6QP4}C^9Te`(`d-wDRqum4IseMH7~K zg8tb)9)1tg^;p<^Q7Rz{ES;s9)PojP>|@CB`F$$KFMvlsV&6BRjXs@M0Ehq^(*1Kx z43J)ZaR7AIt}U_b+Z!2;hxvXV*}jwZJDmF^NB+dGU$c4Wc_Ugq(o}4I7Cfhx{RyS_ z_g$j#AYj*S#`S+>p`^*6xOcnf7lZ}PYXWn7YcjF+Lueg~?6|KH-~(TQGDP5;k5aR8 zJkoyQ=UhI!MSuUS6IVP?6?x;rtbm)sm?)tpm&O)YvHJ!uit9GvF!;IR=aiSPUYV3d zM>CFl-TMfrQjjbpoh8~FH_vsy%@mlGOgm}Hka{p{@x{{f&Sn4xph?e?w8cQ{>Jr%c zff^|~rKmOkRJy4-y4Mu}3}H_s_2{%g45nETKnN8D8Q0UjUSqt>Y04w zz4c4pzFZG#{#4326@5|j`I!=#hwu8BOLDP#90@}GtQe_5XCO{O>MPe*OfDvUi9$=x z1ikc{pkKTc=F69&l43xKEuTNgKq`V029fk69dpRQPd_8Vum=xZ^BqMA0ODaKS|1qW&L$=Y$M;SKu`q!45flRDHA00AI99No{v-$xrAlRFL z)EE!UK<|mneVGgj?Yil7@>K7+_)@YFT^4WTo#(?1orjw`ykDi zn$PTi?}mnTP*TYT-Yf&fIc!KY83Ffy1g>2oOQC)ydV+Z8l|Vxb3`5>1N-QY_EhjT) zfHVdclCuK+$_YCal$KNjqfOg*XO7!9$q0R z2%;<*ZMU=C)!uK%>QdD|{$Ayk=Tdu@;2F9Jm?+1eL?JZ@>?{6yH5)*=ddCKg+ct;@ z+W@o8(oSNf^bUW6zr&?Ex_P&8h%d=RD+B)Ey{jwhE{$d!HH*e*_TAA940(UpPzoA)&LO z(ot%eaIiFMu~^Fotu&q1vFllJ{c!Wn^qGv)bN5%;d#b&SHsairgmYZ0MB4!YraQCF zJAFf={QG9)@n(MX@bEPIFBK+EVB=Juo>+hl(0~dvl9Ub$GbF%8`=C~8U^3ODx81Z% zcgZa^Ymo0eb6l|vy7mMzmE3&t?AgtOgV&hFwI*uZ)=f@dK%Jdl1tmPV%^KmDjW__) zy>~R-0>Cp(Zh_9hF6q_st8C>m}F!wDrCf6Li)7lItqqoD=~H zqifOB`Wyhw{o!LWlVv;Apzq{l~SXCi>7^oVOyUiH44 zQz7k3Wmve7mloK^u%fs+0uIT%K=Bcn&7*gHp8;6-b;23qQT6Kl>mZp2Ptyt~YqbC! zHr}A9q7p&A+JU)GPui>~Lvp#--Li#8W)2PE;^kFaW9Lw?Q!*2II!ODIDOo61qL{>x z?*v{?HmUYDM$+C01U-@_V|WaYS09wAz3>=rB1QmS&EpJ0u(eKbwzfztz1u}ts+X-E ztkEf}eiL5N*38W@`uQ>nf)aQ&BU2lGidg$9aE!#RirjreO0oaFzb9-~Ikf!LcN~wh zC89E0>IZ`YP^?8hr1(=FBe|a#mQH#m6pR_>XJ%!6TYl)4m~ubS$hUKhf)B8xggm8{ zLY}VF?Z)9$lI_!bDRPv3lP;+A-R2pb^}eP~J0WEO5bz=K60M*95JduGlf4CGJ8ZE2 z&}JSRO5TgSCFG7WWhBt$6cIBZzPRVb0P6J9+h43#Zr6V{)PMpgy9L^gYC1$g3e-`etDCf;Ud^~0)MR5@+m;gS2*V*cpcKhHr5XyCCymLka#8-oZR*LLEW z?`uXb4};6^6ECBYoA0Ug#XYIT33K;Vb`w)!BGZo&@1n4y58}B9oDolwiRES`c(YSi zaFu^R;c{^SVhx0{0jd6^0Z8>zFN2!ASpxiT5eHP;kVT*5vRF$C#wgjcp+xIpW8_bu zW{Su0f#kgcLrKaynH~e~rcZr^Nc+L7OZvtS$*)C%l<~8I07o$rFS%T3wQ(h0E^%(E zQ3AFhBL`p}1I^NM02*?Runo`ax%W6z$SC$-?;_qK8fCo$n{hr@Y#w{BPwN#C;J)9f z5E(7k`@66bFJJIH1i)8jbHbVl>O`|V#Oqm@?Nk%jCA@04u zh)eM|9Ziu4NYEp>zW14$z-jL`4Oghkw>kIZSwNLbNaX5)Y+bj3#*^~s3u}>>7R3O7 z$=ONwh;U}6B4EI9E z)2aW3flU!B^Ds}X+v~Rh8Kgj)P`~Fot`}hrl7o5VD%xpyXBl6_|6B7WGeUnBl^K8D zpG^$_y_?SpG_9+bB-&dmZ~5(t|EIk^cY}uZIwFJir#$>WK3?enL)hR&#eZ9-|M|M# zZ;(kKU>HX;J`nz8z5Qg}fBSey2yvAkr!f7Jl%L}AFCQ!_1beh2G=uT?NPn8i|2dhD zfK7f~KcM%&!A52W2n?XL_f!4Lz<={6f(WJ%HB{RA-$?PdB~x4qdUbd8G~&;t-&A7} z;*|V>Q+fNF(EqV$V#?uhhpaDkkTG z4byjUzWbYaG?WClV#zWG-V@P^PEz)pPrZCh93n$~;`5mLA16nV_{D?OrqO18FCsPl z(w`xTftwjb=)yM}w}vQkqMLOA9@(E}^#03#`5R$m6v%We`mQi}sr}W!h9WJHynoWd z;|%C!`DHCYb3Ith;{Qh~e0pTkJCXifRNKeQ?8PniZ;CE1FTS;nybt@Z!1a62gBUK~ z<{fva1U-9p=YZ_H#8kRX%u}Dj8&uf|$N$^Dh768>I;}ff1etSDX}=E%QtV?XYf_t z-!ucZl8Ef8Ly9A6D02Dzd*9bKFC+fcE%}@o=}-;LPAiOFle?{u!NQga2NQn= z?960Mb_V;{`Y1ku*JivDve>iRIW7hz60}H#}_RnsrEV0<@yjVAj%YVai4|)bfz^;QKDae0oOZc0rK@n#~ zn04HwzR?Y;IlBOzyu_p_elO<#qJ1dn0F`pU|EJ{te%=3MlAl}l1#mrj|GWO>Z-B1k z{{LNDSAf@d&HpBR|2o&e8xetoPC+`E_l*5m#OP|TF?*e|hcdiHyz53QHxv}yC)#lTc%oYIIH7KM z1?9qd=T1~%oNBi2Op8T2J3VNmizivyr=-yMFg`15G&aqzR?#r!q`JB3Wa^Hqzk$tq z0V~3X0iKk$2Dfr{E}7$3_SkHX9(6EKI1Gd0*Vfy6qRCw2+ll8>CaV^e-u50U=kji3 zYa^}v-MhG-HJCMoFpH~}8#{!!GI?mxjys(!EiyGr5tu26g|O2%*;Rk*C;KyZCPWa8 zvm{B^O_w|VH=$^FiMivU(LPRIo#%W}P<45_pdIQHY>1x2+N+s`A7ye02dCu_x01`M z+Am?7)`lLp^rc5+9=g+7BXVb|<=2GsII5@vR7Rl(L%EIQFqH2KgXl@HfL|vxv_7?N zVXn?N1_C;UXJ0Y1I z35zET&OwHdNm+mQRzGP~Uh!p}vig(v4GW9Z*GrKrFG=`9y^YfZefPc+LBysg0;SP7 z$-zRe)s{1x_)$#QRH_TIBD_=HDZt^%IO;T$OJM!>YUZG4x||37FM&w9uoWab(mTm=XI5w>;NEO(Z1-Hg4ec~Xqt~3cHC7DUwF=1mm*obWnO|+wh zh~=fG!Y}N!s2x^Gc=suLY#B0NRwzogfG3JN6r~IwKGduAtT0-N6~)HPrG0gUiaU+o zq!(MUbJePam&E*J22ypv0@=D_`!`+xzcfNn@dY~c7GtRgTCPkwmD-U{`WV-E)VRZ# zSb+<0!JRNUVau}lQxC1pjU%4IrY++|1sLi_vNZE%c2}McTv*Lb@k+6%u=!DFwG@E2 zOgkvbgfjbW>d=eN8+q+4id&vwpzF2j^!SNV+vEml;xco9d>kJq)m@~`?gds@gi~&# z?e&vcP2u;Lr$cc)jctjq7mo%`UV$*v+vYiAUfcrkgj&DDQA6-hN~hn@jKR7C_ih<$ zvDDT=Ce+LeHNt1HQ=Pcvk;IR#W9T))rgqQVm213q5`4`>WxmK3xwNsf-yYxXn<4FX z)>HLSpSYsj$x*QH4&L?N`i{W*8XyU=@br_hKcXv71c2)~kU&v6AwBAU3(g-$f&!U; zqF|R)6S>D^IoQ+@$uLuefW-P94A8k+V3V{Nv-Cdr;em2O>ihQ?Bu?DcKghAveb)2s zeVr=;Ib5i_mOm6K%}$?4)w5H{r2jsQRJsDiR_dS}2Hg8~=Aw?(d71pfYK(Ke19~pd zjjr~pUmJuwYKAjQlsECa;3Zv|+QpD74?gDs&>0W!P0kn$`(o4wqr51Baedr-HT`G|bvzW;IROwqrH@s`=&FQ4kV{>zFEHadHDLM*BW-(Zv z|LLNzh!D)=FjnY4?-pSjY3>s)u~S)8iJM5LH`77ttQ(&cdu;!7 zn4XXTtAr70CH=g1{YNu~C5BimNt8`!{G5tY?=)9|ZehHRL#c0VOhSx}llA80SZ|#G zL%G!2$|J=0v`Hgs)Ghi@M$b*8aPeEoDDQb((}oy#$mb5I+tI9f;W}u>eQ)2x{BpIf zU0g-@wZVoxqM&1ry*TD$;qk&8aH6sMyq6V7{`E*89IP^d&N zsxTx8nHbjzmY%X3-Ogw=9KRZEt!!!^GjaBb;5o8e@L)zwa#{drdwboOkF zBu48iN!~v?Wq*kM9N+T+?MEAGje=`_tCMKvoi9v%9XxP}n=NmqOi<#e(jhk9qNNtH z_wOxPr-d>cvO~izjG!5Z%YwW7)D=Rin8~;D6C$46_kWW6RC@0nNsk?HB<$=`Hzgb> z&UfWFp)3nyry(-q6kEHkr!NEO8C9M>=Z{JEiP9_o@;&hk7ix6VkIkiT@ zQXky}>#vGzte>^`X-&Oge}YMtvQZtR`8PEMEaa8`EPQp>x3<`7gxqT*G2FM)8?(xO z(l~!Hy)sy8KHBN8TYt$MXRpCWL z)#=`&rxGPLb<5PcM)f{{%dt$JF@D{i=AoEmRE>(iN8!ly48LEU&amwy)NQ1xFM5|x zx@BU|S6xjl3P;6_2@I?Y0t_;br!D7DoI4wTEF&L?2{q{ZUiG-kr zgs?`bl?W1xT7HK^oO?j0RIB&$VU-i-%eUu&3{R9UIMrChDbcS8u*x=kKj4$b4@L?` zCl4Adr@*!Gy2dYO!l&?`^emghhZg&&#!H|3+Xi?uX7&lK3a)O5S4Of?>b5s~Da+<2 z!ewgNnY;k{=mi>e>5Jl0&__t-y^ zL;lwROZaQXP2#cue2d|(0rI+I;iBYD336fbvs35#$V?}-z)Wnsv}c~&i{(oJNjG+_ zcy`u*#HWXpbYUl(?0e90(9RdDnbOy}$;e)NNLuGqSC6HsA@o3zzLV#nkKwZLa`#;i zFHA|*MvFI^hbja7VCI44gB{v5UtPdj1uk@2=sp|sNsdU#2|y=ej9RLULnz0b(qDFt zdoHe0Kh8~n-B%c+}9P>+wfTH%v}!>=yNozJsJQg@uOC9LPD=u9*&;CM(QpL%7ce6sitXOt@!B{IruY zmPiwt8q+5~>Lo+bSnbYw#t+z4Lt?gU;)gl8G zQ*FMI1BEk-{Uq!2T*(9O{l;EvB#7F8PQG_tY0z zOZk2yBUqda4eE232a)zPqY=H7?owM%IznJZFPk&?CCaN=oDJ@{`1bH~x%TMBbsX zCQVZpofi?HGnjU_iO_vnUKVm;+Bn2Vj{!B5SHI?i$Yt%Q=xJ`gc`z3c-4nl?5eF?> z2+DPIxte$|S7m5|F+E=YzJ!U{ZVGOk`uw3J&PQ8Q*qmoyE|jr7PIYpf-5bN1+-O8u zaU#!{S>MDc`L)P(%QQ7hu}sBhGEo#8HM9UXUh0IOFj-$!Pu{Ov>Eg~7wIX|tf}0%q zx5jL_1{hnY!Hd3nLTc^zMF-3JOgGIcfBLO2?f_xj_6))Q*-=nS9=N`K(ul_x=#^S1 zzD@=LYjFidwMu)+gC@D@{s5MIsN5$W=95G>USHem`NX+G-f6UNit+X7{^j#%lB zxicR*7+Bj=iS{)6)=|1`TnY;B;&W%{I62Y>jITFgujn;eRLqDiF^It3NBf$N&LQs} zN{ZGD*XWHl?hI7*b(&)v?Zdvo^+*b{sF{wCOOy*CuHjX|zrt#H4}!#6qfMqMU4~~B zZ5jR)?O!n}gW1hGRGbg00q$cRs6iD5R52*GyrCi)&eJa;_Z}J=3gvsY>&K8FU>yb& zCx6c^WWV3I|ASI=qkQNn$$#tX>BUh4Cp0%Kl$eR^yq6QA}TQOVd z|M*GTX=9>d670tX98BvJ-OmnE{Ynz)kEw~q#epz@%owGi^X3Mod|AET?QIGsJSfet z5j`Qlm|4|yA4?_blOb`i!{pJhVc^=M7w8teyK{Z?XtnOMOUF2C_q>`^X1W9(Cu!xs z`boP~)aQesjh68Q#NNn}5u=E;7=_`LN1@X!?jbA?BtH2A`Pj=?K@nIwa&RqM39tzJ ztRS21m;7u4#Ty<-_KAUBleM-`0N$5Skx#)by3BC_Y)6ESLlRlN2QiI2;~t7%0Bsw@U-^I0iT)8ob{6nIa5!BSJzi{5Z$j|L8f%+29?E!=fIICpPuV zPW(O*WnfJT?`BTm?rM-M>XK6Wu`0{}$NY{E*~d#BC^X&20k@;TsU=f*R^sGnH@0yv zD(d3=2T2s)F&83XKG17-=>YKT*Y72>`d3zTnkCq4p6bN z+?(psCy7;n7a8>otc)IaPMfE;f=GajdfaD~gu7pNADp3D(`8?MbOF_PSme7u_txe3 zV7coNwdAeC^)hwKq>SIe>{m5`VRx$rKO(5hOZ`zj2KP(4Fb->n#c5%NFf}{K7yp~M z{D+-(C9stOc8!;Et3D3*g?ME5w8yg^Y@JV?(AvKXMWx+0Cc(UA1eg43BKnXOu&u5l z%77aWk4^6z&p;nmQqBh5rfnC#-aZiHbmTvf)npBH*(O3S>|gG%`(|_V(i`btp}Kc8 zLD~g68}3?V@a&bCDWMYfeOS}+8bpkW9OWS9-1IX4SwxK1eTTe}$2vN3=NJUrCkiXD zebOgW*Gh~G4GmXakAZ!{@H3nPcr~-ZD6xC0#?^AH*z}(Fo=tj7KF&2Wbfvuj4)Rge zDKfNG<2KG!V@j_a|HHuuYQJJ*MVcPZ1F{5tiN-eFp_Xzl8!j-uU-n8(?QkPry319) z-rRWMf+Tgx?~JH-gjBzZ=gUGF6*qkxc~ahM68urQy9kQ>Ng^~8@|FNpG|0{W?(YQ> z!2&BhB>79+xa477{ejCPy2cw71rwGa{OTywpkfkgLlO`2{%k){-f?Xk`ruFps^75~ zZ_JBc({{S0CEMpTGUPtnC%Sx%lh1Ry)Kb}%6uYz7Wr-Yxj(%M7iDtLVtgTn682l1M zB8$o{vTU^KVbH5}`>f75`lcwg$UTLcmefv@*`q%TSpfqzOq`1|*r=WfINFx?+Uej} zoH;qJ@}ZslZC8CH=3qp6s^e_mj32^I#MK&4yrbMUMxWlh$Jps%9=rwQ=eM__$1x+R zfR(-q@umLrrly1oU;=Vfz1mRYAVrD+>*Lg1CBacCMU{5~plvH>GqV&|VK0J#`jGfL zTV0R$>(|+=A9pB&vx|${rE~&L3MVVIN<(G5by_^4Y*n^_o2sDMGa*J>m58yx-j;O< zLU0Yp-UJ0bF*@-EbdR<}seW5eis%q(zvaAmH>@z{teYzKEwg`hP7>@%oml8m#EnNs zIi>KImo8rHM)wHy!&v8`f{s%nH5-+a7C=ubE1)G z0Y17WFq~Jf*d*whi^BNIUD#pyjQ?Rrp85KiRRbnTdni4assilve zi{e%?7yBPk3+0;h_4eM!uI~M)_4fvtgK)Zga8OcG%1%D#D3Fe3SG~xT*kCho)Adz- zk#bRH`DFPv2=(-^#G>P(>t#19tDJLmcV+fY6w0jo;=-kmpIzkx-Jz$aU%I?FL90qh z%X^v1WO;E>5mD>jli^(Fu`y+|Gd^AEkUXA)F*2^Hsrj@#6u7I%k*c-o-sn`nE$2#F z)u$LjMU@bM8Mk&BekxxB=_qNt4s^{_i+P$Um$>ZMpAlr##y#ecI1wA75`JB>yP_lA zdyUh&BoTgLrB^F5oskMtfM9}5nUZ&ei~OK?%56R9Q4s(la+7RJ5q0f8%*tGM*CA!n zg9%*BE09|MlWbd;s(;`B)nS?21%!#)k3>H#_Mrn9smZS@WVTXe%XZ}qnl)JsLS~;v z4OFl1Drpxzgo{ZZg!s!vG%DbzxbSy$V?9KE<&*+gHo&C_3B+`+mEIqjlACkuf(VQV zObF=&5K-lWWhD9erzx`9x_av$a&mI=bFbO#Z5$Y6nGbCqpVoNs^S}U7Gim!-%EWDF z>+abC2foX=Wqb$V32eBI9ip0?9FaCFd`(9hILro^JQu38oC-;+$9s%-Gwd$)>@vEi zrlmQmnfot9%69^d6rqncu#v3tNkf5kxiN5-8gYff%rfcySrV_MMC2D6wXo^vOZE4YSAk*xZ!mGWtQ#1y;EP_w{C%d-HAK@xWoz z3wuMJDSfbTsc*JMTMucog-GSPxLz6NtTyBryILnE60MuOh@V<%IltzHaEFT~^Y41k zsEcn10u?VN{k8}M>%zuaIXPy>{>Y8oH1GVf1fXTzPsw=oUdB#8IK{u0Wmj9%$ZmvP zIn5uRmSmUa@ylq;(fj;Hed|F=mQi9{LE5>i>&lo5p_EAyRAMjYEQnzBc> zW3P;3uVWXoHz!-!4vugf+wVH<_vrKe{o`?*ao^YVy07cCpU+D-<)PfO$}o}DrE&D= zpch(-lxdn?)M;>gdoE#S@)^}8AiDR!Hb2Qr%z>wkRXW*#36>_YM;&7FJYUDZkLJ@{ zQF5Aqmd(#wprj-$YfEajr(42u*>=Y)jfz+LG4C9V_B#Y8&RlhdW7gK!XKHp=PB20B zLm+M3WzOn-9Nli4OA8fwRqq^7gDnZ32MZdFC}xx^%CO1*OZG^n3VT8{0k8$eomVH} z^8@Kcf#OCdnfIh~L+yRiKMLu}{+F4~{Q$Q(NeI!^6ZO^Y@0T zT#D3aS9PCZR;I<36A$LezO03kG_bf#A5XZfXGKij`q0wNjcIBnC0;t?ti;S4RcSdT zjdRN&4Cb|65?7b&I9zMvS$Sl?=$IEW;z1`yUX^Zxw9rGQy&NWWLVWh+bl=gB7Jyl$ zrKUCm38A>IuMc*0H3giWNjTig5SsGRKOgiI{Ok7S{*PgPw^d$})1cvjpr5^EpQDE75ib$pn}ZQ*%5u`+znH4P zCs-B5PVwEx_PLO33X~W>zuFE4b@A5AV_%NYX3NMtt)6gS4;XmURT-F1#aDt*mIN=SEX^zg}9r)j-@%D&B}Fb3NR(!1}3OIv4&(bNND!d_F{J4UQC}ulf$xW zF6yqH6#^EAfw{m!Ct!?xW(FraqCQDz_gLmtn~-1~5Qi&cm3{|j=DX@ZVB4NXIBHc8 zfCHuk#|1xfs>;uA=nVVW!a+?2mweJd%}xKTEb#HsPTi zCp(C*Cn1RQnMy<^E0v%&ipZ6m{wy_4`#uytIsZamMoO^<#EHjQp(^omU=LvVq!Vt;N9Qu9Di_&1KYM_ z6*P4EJoavP)!HPcJ$dqPsHl3nOI(*@mtWzz+FE6gFoM2wauf8WSVXnnLpy%$DbTSj zndnILd1`U1J>t`zuDXcWB-eNCHc7imeo6%EHskOd^W_7JjgsohctUivIf?Mt3?kQ3 z0~oH!#Q_$kdU(B#Q=x2qWm|5@u%4AU^@L?VV&En68t}P_9AnWqYcez#aFY9B^XrT8 z^BQ_XPbp3dI5QU>9_+eBXry+dVm2M)NcK((Jier78ICIO9)?UP=dC+$n~R}7#*4YR zGF>>_M|;Z1eLkFGISj_2kZyxXpxJ{>JF({4PD{+q2Q0-cLn_om^wQ|>$sE9GLEGJ> z`Qn8!dw2ACfhz0!LsPq-2J(F71MD{{0;e-$kC{r3-Vg?@bLohkY5PTRKy|bSo6I!SRXu_R2IWLsCzbCW@DptQQTwCxmFmkR0w1YI-<9x zf+X5|+E?tbZzlBih1T17I}G5vA3?^d+^^ldx9w=6<*FLtns+F2JGTz*;2)Uin9-*s zq6mSlEv8H2Lr^hUy*0>6>#7eQA|A@xQ|tI1V#z=gomZ!^CEhEU zSMx+*tL6Zy6}w?q);tffo^I9Yz_Z^T5c*7pK6Lj4*#QrSSOKfcjLjvXO4TbR2N*3= zn6cUymjqFl!md*5Gp_SVvk(+9F>x{~y0Wi3>DHLcW+`)uWjjhf1^x?eCcC zwU34H2sfI!GNO{?>X&C|zN;P9=LEuQIxP2!Lp@e{8N6Qr6hK(XR3DgTBGS#$kb>!q?3=y0euF8tKt%Um-fOzdxwh0C7I5RbcExlatx(AJmo@&a{Sq zcBo!i*vj46ftP}eHR2kf%8(hUB>L^ zya*|8)aAty;IuM6thGAj8f+esduKUh9?>rCQGs~&SZ;O3?gY6^R}ZH*K=DgaTHNcx1Z}w?hCP8v#_*5&hYbB z?w-mcCE9RD1eS}K=g$QQjt7%6JJ(fnPL`5{I!Kp_ESPy#n_$VH+2g)+oYPh7onasW z?;F@&8XEa*#eT#xT4@Zev=iL0x4jPaB`g`r)fP-}MHE<#2-sU}jaR$*bxKbv4eL_` zMn;B)hDv96*o>AJ=$8PoZ-=XZiS+=IbO;k&8qb{B#(cRj{O1A6WC31q7o}BEzD+tY z%OUnXITa%OFRFjX^I$upLZCuPBoF!NfT3X6x<4~yds6Bd2}L^_ ze~hq0mQb%A;Yuf;(Od*)8VW_DaqqG@Trm}3kuC25If8PoJLGUG40 z4S^E+Dmqpof*saJh=NWRJ9l3rehd1i%(ktqpsOw~=VSbiID5C&VU@4%8PGBCiM396 z{*x{JJmJ?3%5~J4_$KzGk)vyg&BQp*4Pn#HI4?_ekszz#;+gbQ9Dm3C=U+=6btxs?9PxEqty%fR#S^(nznZ=u6T+CjShZ5ohOHd1JCvNhYsv z6_tY@v$16gY6Lp^2cQHeAlGb3t;;gs%|Nz+Ph9pvMULtG!omW6S3M=zkIeW?0dQ{! z6z5F;sB-w2fb3U~yHkb04+-HuB6IU@CDN$AGMnsoB@~>U=d!%U(HHVg_~CH$X#En% znG?swkGjNZ=*g*if-L6d&a9r*Lh-z}tfpnGcc9Q}9pS!`%RUAfo07ZlNOZ|ID4p_N z#})DE;9JsNNhvFwv5<{gdze$6LD|T-@GJ3Ci60P14yVI7XGI5_5|t3FHz8?jc3#aQ zZ=bi4R4sJZao2XaOk*OKoY4z;eEU0u=hrNf<-Hm!vz-NDeGeLWU%1iH2^|9fJgK^E zk@oFM?lU{GEYxI9INUY(7Uy*#O2Kzgh*#{#l{TP@66m>LXaSypSzM)7vu zxTPfeMHZq`uh+NP*EX>Y+3+QdJgpke7{T~Ip zUk?z69SCuqn-~0%w$Yhcgf(=tI{N)U^LQb>*}J%K5-3lK*+hJc7Rz$A!AJ-17~efS8#NQ$+xE2kW08))7bJ# zF0>XLBODAOA_?!FqE=aZO7Urn+I_$dk4HdvyxSo|&}!I7X(+c|K2Op?X1GWAw4n7H zp!M!3DFp&`bQjkk(9+&sOw|Rdy4>2OQTV#1l7nyqBgd6d4(&4QaM=LH*CfHYsfP~z zCAAJQ4$DPKPvGnmV^U7pH8rT)<}ex^)VxClVM`}Mz2@=%=0AL?WQ|F$bUA5s;}hdQ{6_K^`PCP0$HpO7eiDlOQQDGq`d)a5 z+9T)xaIN1*at&ZK#Yk*~Bmdmp_4DhKJd${06VGXWH#h%12=7z4Lm8e4!tQ)$G5N6& z{_E?j00yyQbMwd#0sfNhKVJbMoNJimw{Q6|G0A&GIBZdwy<{Kzw>sBP<@)=byqj^u ziN*%#{fHM&k%8v>_bbHskl3gH(m3C*PF;Xw0V*g94E+A9pMv=3xWETQ0A=HPmEZhZ zI_rN;SCaJ+z;ehf<)%#in8yFOa6NA1cNK?!q0fGw*x&BsqX~BYaW~ZbjsFixUIk+y zRShfs;}-b!0h>CQTEn=o@c$1g@h%uceSd1M&xtMjTctUA6;kBnSVhr_{c+U*g$zi7q5}!X2;`WcQM!CCdwCuy@MV~+ z3BZ@c#a=fbJ(ggFZ7PdTP54zLC13%3yu2(YF*Xq zjSMZEIWY{jZ^8pIu@$JAuos^Dm;3|+!WQn-1k;aK>u-Xd%i*0}Yv@C}GPF;oWpdH~ zv6H03fc-H;#Gb8*nB|TISRSh-y}LUtZVxKpq~cb+T$S-A#g9X;S%}?>`TP-?;i;7< zesACQN}g&)Ag=A{nUavK-uC=)`K#%b$Tzjwk_S;NTS?DsYcA~kul{5(C`mrDd=gi_ z(Z;KYm*Rv$Z@^QI5{tRvk**`WccylHp?M=Wwrn?CdCwdXXD2IByCxxM)K=J--M-e4 zD!nk25v$KcQI`ie1!udo*!<|Z1PS|LmBnF!q$T0~#k93Mpi!Ef-1r?;Ux$GIjj}Vn z`;tP_tDT{t4+Xo=4qUcDCWS8{(<`{N3x^+@4kfGgR)44$x4Lp*_#)?FjZ%!sGP^FN zn55&(Oq<{#(?!mR+>nb+vCSf!WfCAVHnp$^7UDqzPZaEIsOkS^F7DM3_K*TBN;M+xv-ud$v zxBGGEE3Sw6@5=R2=UU1Waz{$uj6P({&#tE2{n*P|V{yPz=jqNccarF-ZJ561%@j zYJwALFrV{&AJEhH);ukx#6ba^^)|axB5Ku(b{q;v9`>DK$RR$BG(DYy)EEYRz;BKi z<8K)1q7n|bmT84%qWqShZcrvNX%PqY9yog(aIkx|GXibGiP3HG^*q{b_1H0xssYt*u^;N&Y-4{J4*(r(U2Q zk-|$9H;A?a-H6u^=IOTrLN?WVRfdHZCtz{jHq~XFq-S#wo>rqq6Gth0(GxWgqpHE$ zy*O>?)42ewxsj+&h#7O%ZtdZsX*R=dwJ_F+Dt2g))Fu!N3q@(UMX0-c1a&KXjZx&|3eYq7 z5LmLd9N*mXsPW2*=OZ2l>@Mkb&-bUIi)OVe4odx&-nLG-%-C8=tp z?Ndhm*-kCSO|;t|mKdaRDTN=7EgFWIL^s`D7n#YJ5D&iAgb`g^;!c*bRDAJ5#%>~4 z*-vQ<-XeB>_l9ANy;IC|ITgZYPxDY%SPTIG$z_N`b3-~wQZejyoGU_yc@9ouXEq($6RI01hd1X{8E;6; zmb1@P_MB)JKH+aZ%7fV9syqN%v?FBUVRxZJMAyTKJh9ynvHK4R^LeTd6Z?B!v<0N1 zboE6(6SRzlT+9>2cx!F$oj^D9+63~g>v=-Ns=@&LDy28`388%%oO68H=h<|>Ax}u0 zm58*Qyyx1=;dH2F*QT(0o-twIDHEa%luw}lc>W3x2jAfNvfiE#4v}*4l>@_E*nM)t z{(}yzj>#Yc0Eg*fJLtuaGvUMKS(LUKq6T%J&8zof1}m{kmrW_FdZRksvnwd~{Xhr# zX*8lg=lW#B_$=cm#I==Oj{(;%LW{KSGdVNjpH*{OCU2Xa-LubV*EtX>l=aXON4%VQKBh9`HeMI?B zSL&w^op~I1bNufZM*=oM=R3t!o54rC_eKDVwU$7KX5_Gf97epqebX^VDp%^^9AZJ$ z<4SBzbzX|D*lL@yM}ADG=Y+>=CegB^0qrj|;>=0A=T57Jkt%RwEidE=J)Kh*V>&Lr z)sDY5ZmAz7Gu3oA*A5Tb(TY(o@<8K2!#n^^F{kIAH6d;)1;~0^6rCbTs$erQo`(TF zjy-9I!XGX=TQ7H`gyx>suZ>RG#LBpespIL&tx5o>q{+6CVX=$%@kt~DG2gy8v_`CV zg6rymW|74+E`Da)?z*ZAutks0<-J1$8&V`zT?*#r9O{FaytsS2phcp zOS?}Y2#gx%;LwPJDykKr1wW4HMVH9dX=Erl!5ORv`g-OV++^dmjR)OorNox-cA7Ar z+Ku5JNRHvD6aJW1iZJe`2dk%%2j|-&;nxJ5V%ZnaRiBQm@4pneLiuqM^wTXcI1j#} z96Y_OwXk6ySOz*)iCrUoyZHmdZWM`N@`Y_Uv^w=KI-ydCAf7-J;1W({r0>{aGB_|Gy z`^uHCr4u6g=8zK7asrliB<~$|qdes1W1}zZX$lW(Wq2aJ$}TX8=>WH)9mV2_)s%=NXe?{QT}rSMo5u8h`(T3*dtW zLTavgOO_$cvYgc7+`u&$Vvs|CnJfSP;fdCCT^3XSE67x=!HSJvyL+Bt=wwTPhA3+Wrcy_pnXFvfO@m!Cl zjka7kPh8o9!oy)M@xC%F4?vonHvfF88va2s2K#!r`{tPncwwSqSGKZkrpwnMpeHsL_R4U2zK#zowAuR^a}R!+G0EX~nN=}LQ}l7pL$LZ*#%zNF00y%A~_vWGvO-#`ydDhWm z`_&0u#-k8)W zie<~IUX2%BpGR$S5&QXL7I;Lb9=c3T$+bHm-w)M#gillaDW3hBxRBI23J87Rl@gEu zW=Ag4hOW+*Rb*xnD}ge{sq(syl;qS&+)a3#f@F0mf$yJ&_5i#U;lpAuA?vU zvmt%RLg0OiNeT^a(JWYr+-AP+?lqr@dbwcmfp3MSYBw6{ap+y0c37w9kzrXk#K7mV zpQh#UL3>x0a8JUcwjEQrz_YtHh#(SCW2D04`p9%+unIdrkUJnVU%UjX98}JR(;s%7 zVq(4|w*C>dne-Ps>F~&4(?t5(H$HXuI8N>w8Ztqa z^*NcI|?^saY0NKy!fGrK`GBUOwiIgm7~9pqn`7-mRBP-hxS-c;~8bt5|%)oDTKsc=x7jo7+?qhgI!|t|9y5!%+vd*clkoMBAo=V_;dZQV;^E;J*y*l$-cy z85Sh*xEPuRKVLiXvAP`_D>&P>i*~oRMjgYGU=bS??;3W-R4{l@9nsvyl-SA@><+D_iKN_D98Uu=r+btiYD#YBJPB2+n=LT5uZ`U1J z!G|zWZ|Tw8d>X5*nwxvG-T8r9lS_TW4RoyR)sO@6$~>kwTh zPP518$Bj(^8e^RDl}LlUw{OE0PnS0Dx%f%9mGnMQr3zEor7RjRT~tHTd#B>?ZPr$5pLbj7FwU_h?K}*7j|B(1bxD>U*nRCJ|?7wOx&<&F;- z%i}%fj@ia`sEWU}x#Qu(?X|Dx)u`5y$C=VdQeEJ5`ObCnul(k59Fx(tovlk-lk819pmVX@ zwXVFHP(V*Ew{~r*a`VfEKOck0Uo5-T_P2t@gq9yxY>dXHvDV&eRcMouOD0u#Eidm) z+2Y+4v&7h(o0SzoH4Za85K>+lD|!c%r)ST=hbW1amp!t!j*Dx;MI# zrAi5om=~}wu67m0Zk?Xhe}9xp!X$2QIYC2U)T%ZCvT>sp4-t5KqE@Pw>CwirgtdEj zjjZ4Wp>#th=H`wz0mAvBiCE5^xd`jZIpxsszIt)m_Ja~$V_Ymvmh45+aWUYAh>uVx z-JevZWGZpgJF#9d>ZutP-faVUdBp&s}l*hN|^*6rYtucUWDZ zoA0kXYkJNlowP#vpd{r{B0%pOGOeDNP6MZ^xiMGh-DgrCbXZ_mm`xXVpUf$87~Rft zfPHCHwQG2CV()2>s}cZq9b+oU7=_FkudS{--PX%EGneKuB6r3!-^ge%GMluCML<^A z$ao8u;$o$esGZn7v8NUKa*g!#5k4fnr%4K8IZgXsS{42^W+E@he6xFDL$%LcYGxMh zuo=5kuGpcct!c@7RmWs8EM)Ca&GaU09BWMpK>PvDECsYX`KW<6rD zu{vDb*cuoZFgZjJqnG`Mg7=>4mcENR2w*YEr)v7tG}{q-o|jkE&W=C=Li37?@6Y8O z?!a;|iciQ^qk0jdBBe+!0%%Kjv}(ZP;o@IFluJir60&+;1Ng-vrry z&iotyhmpYD4rGYP5Zdy2poITY zM88d(mHGszlo}?VHU~|B|GLmWehIEimRP0{jQYRI5+Ig-bkmFfU)Kd>#aww|y=)<^ zzQRTK)#xxIYBM#^@weRkQh45)G`AJ+T#XG2z4FK=W55PgLs^0OVEDG9nAqF)$b$z1 z3V)f0Kjz?~AG;!WaGT6oi&ZSmV@cE6#CP5iTLqCHsqQ+88NdJFtW0HAYV(%P7nil0 zLHAxs5_ERmFRS8f;T2caQ`i%Cc`KNC#~!>HM(-ILlkF+WbkU1hxmr>Cw>go80F`#M zY_J9XXeuvL+?F{xx%nhc)m<_WD_1oaEibym=t*yW59Bdi#uoJnO&Y+5t0%haw%_qL zdh9ws^Fk8mBtj~VXAY*~qgE=+d&?|Mq_aEcIFk2h$4T(#Oywoy7&rR5LOkNxU|XUx z2+~WnCAcx^p~5H92TR$RJHe=^s_&lueN^6FB&`@_!a4Qs%S4UAxN-|MAO1c(i?620 z@o_FoxYAWIvtlFK6awKan}sGC!{?73{7ZvP8pFIuxCf^rqj@=wEk`qj^vt&jtnfWM z|7BW9+fZ3j;*Kj1>KOSgR}-03?>%vu=dhc%SSMdhq*L>8UMM{0#e8DyHv4bulPtk2 zP++1|iSQmkv+uk2+#k|=@JRh~ZMk%5lAfJf9J)R9SLYrgK_(&Wq@&XO>6TdC;vos$OrRt?{%QH8Sc-x^4r;Xo! z!8U$QJ9`=S7>M0FojB(eBu4|aBv?z^leiPCCuTg_oD}eMVMnqXp1fl|=0Keo~e&@!hmLM@RqN>j=8c7^G$ zit?+_h?NFHZk)Nqu0R~K&9z{*VH+qthviT}vuEk@bhpoO#MLT7J6{r-Ph7xSus8~0O{-S^@d{uEJN?~|>BV(wxHR~Zv(c;z(_lkxG zeIU9qH<7o|gOQSm$lWl+3-v^g2T3-=n+-W2W%T&I+0pY(L^nAMh`4BaY0kGqi6y0% zw{pqL?dFcDGp_OXI0Tr?T_rf=AmZ5^_{=32p!nMk0 z(SMPXWk?cv#=aYBH{LVwv? zGLr~GJNq)hLgtygI-9Mv{3>wz3fK~P%|RY! zqDTxt#eNE;N;)fM7pNkQDbeuhNmaMa~~Yez0hoL z>bh4wlw-3(grl;4q%i5%c*l6?Wq@7q@!JXRlV(L$J9r`DxBGLIlcyan`Y@|9DH#%w zZ{_yIfzC}=3{TO;RznYE?uORDxFm)wnT|zBlp<2yC;i^OY+%L@gd^M zAuG16RBt7p0VW*}D)R~>=6hbm)dan^KQpLc^K8+A%~h(hx~F6aNhgWC4Qz`f*U|a@ z6&nvB}0ubg4>_5gQr<=ezK|zhIP-^UT7cDGRbc{ zD?!8t-phkw-7YOowNGeSUutDjCSp7L?Y(o0H>25zx+}n;y1zzHcd{F=XV9UBHAHC$ z?tKpSISPjcYw+{(nA6?gDk`i_ZLlu%5TubV6Z!~8HzYdJ;+@$l32A)gwNsITm2vj)hOgStn1AX1@d3A+l2*1EXWFVfk{$_!x z8N6Rw#-p0;DdKUcd2i>FS2G|9B6%v*y0yIDsk486{afL0H(dR<*rWThH-*Hu@(4IH zQ_VdNbh=#QG9d2BT^MyxgzR_baDg@OeoU$Fcd4-k;(VJYbA6ESzKvNzU2li}YYeg` zuAI@knLKAFNtTb*C~cw1?9SJ&axdk&n9i|bLz14zU1rh3&Gmn<(^2mO_add1${hc@ z=9QLAEG}#yi}$oT$mQ0g@n*btj-dWHP7}we9-nB5nXC6c54h)jgz$NNRy2oi){0<6 z-uwf}N3D(XS+XU}e^1fm0(IU=zvlurL1DY*Zrs=3OkPQx2~tt|YMcN4KU`vIrCog79qc zAO0Wf0mvVG&`;=l^qX6lT3Dsz_!z?irTp3`CAm%@9KJgzvBP|$_w6X`&R={9gr+~A z;Wzio88mkxBHdmgetVMCY~EHV_dZJ0ZH07es^fe^;CzL<gu-C<1YhG+YRci8I&N(!PZ^)?@%Km#mbS)hY)=@1q9$NFOcv(>EP%@u+@M;G*bN%~t<$L60)OsOi%{UCayIX0 z(33C8+DRM%A2B<6qgHzv1~3kPb4jIPAU!VNsP)he0X2336f_+qxJPA0pfC#McRIDuO0WOr~o0QMuJwf>R|U5T^1!Iu`oiMuKb0Y%1> zyvspBj(xae2Y#t)Y;FVq;rb3a4^PvNrOAd*W zUoX8%enT#S7wYD`%s~NcpLX3gY;g}Bu&mxE{P)WWe7&qbxNN-3v!yd}>_Xoc6*Ju> z?(AYqfy6ewO@Y4!>D|KKd!K1`pJtJ}eU;m=aItBpQlH8^?mU;!qRi)q8KX+~ zugC#J8`+K8Y{&%)*@Rd4FgD!Ws6Z$#+-VR*nk7s3`&@rsaP>J@&5n-G0&hCOPwS|u zbv#&E=9{qtbIz4)fh+W&FLPo{H!5W6zshc%pws+CS>%@NohJA9=BzkzKEj`K%37}! zXlM)-1ONZFS0u@Rb|S@Ble6Az#VF3e``CD4Zqx=X0K)O~$~`qTPC7a@$C{n7G+y)I zuVedsD=}*n*|{9J@Dl1a^tGeP%+G6GWMEr&B%-SP8*RZ)jmcU_&=SHe?x=tsdJ_Dg z|Bd`XCxuCh<4RWW@UU=8`|Z~-*0{yDTX@;<<*^CG9AfXehZ6`D5|;^zPKsIS zi<&nK;Uw>@Rws11mtpCP1@tZb-zV{X>wh|FK;%l@=r%*KrY)yQsMhK5@|h;B7EX)h z(sZMfU)HbxCDv4YnMw($Bv0?&RvM0x{W;I)PTwU$Z9~xcDm0FYvoV%%`R~@?krdc2 zQcE(rR3NmA$pH|cPFtzJa#J(T^D>J03>v+_Bj7&KJ#!8sd(5;qWDcCb!uI&@Tl;TP z$0NW1&H2pzq8T+*GP3#{H^(gjT5du6yxd%VIS&igI~kDg=cj;EB?1cErPb5|zErCk zbDdW~Y21y>(VKO8Godv8`-A+O`uqAp6oIteEeQKO;@w*ghNjya0r&6MjB&{jY6;Uk zI0W?$!0ssD3&7#5*Epm?X-?dHaJmqjk)ui1ZBTK6A_{3SS%nz<2)FJZ<(v8xf6(+^ zOlSed$Ye-{D9!w8b@I)_tR?7Iy<2~> zyUrz&vRayD@$7H*?6-yaV?KPq7F3%jII;|IjKRDib3Pl6rb;B>c3|V^&WT>kW%pjk z6JPosO8i`SKlct!cZ`BHUitR9K?2=vq=9SNH5y&HG!T-~4a}IBhYzHAu=n4(M_ee4 z2l{lokI%*wIw^O+^LE5;{jp#0t3oovO?5D~WS89ZYV}xj$PY*6e>@e?piA>C^l0?| z_{xuYvr^9h*j>;XW$Mn4TwuKQ1q#Te#j^+Lj-N{j-wfvsqkb z$Z0vf{>O9rd7Ez{2QE11r{=<5vHE$|z<+9@Zxb&76F;f?gzbN>gs*o1*9GnCGvjGi zf4K4Qt_x3j#G6)^Vae@$A4Z+bW^%@lz*lN0A;_w_}1sZHo4L^aYXG@?-zLAnqoB z3Bo)O&sEn-H`9;ZlZcJ&p2m#ZjRssibiEVrwyuL>e$hn`knm79c_pkDwW3w0E8`i7 z^jURk73=d{9?#we9eBSSamiCe3Y>Quc8=b9Li?4&_WOfqCI)fph;e`TiPfTOAALCR zO@rUz?h)dFU#poOS+efalE( zbsm-2O|p5hTx@B09JZ>2M>CHRkK^)~Rst-g-C#yp9B0TGR@;`Thw%_!r<>{)zxIrq zx^p7k04b)vZb;;sv&psmpL-}VoIv=IuGw|T z^AP%c<~;LKlX+cNOp|U#9Gp9s-0}GIE6E0;4G%|g|6Vac?o@|QSdP>xBW5XY+d~1F z{I@D9?56_I^A|$ez8}2ugd#@fft5ZgsaxT2T z^y&7V_={W7*WYnazn)09E#--6WAkF}sFoZ1yBmd@i1(@Emw0$0d1|(9&UV5$0pm#6 zi=*_dS!&s-h++Cw3whO|!a}31)lY?3a8h8@HAAC!&yQSx{4qa5r&Wm3+e5ZnnQ%Ua z&qvSHe^@ayEByHe$Vc6!mN$BIhm<~6k1ms+U=Ts;{e{|R`>Ib${4F;Vk?|MSeGIF8 z_xXGh?&)&A4xFL6sUa-HPVJ6wKV8A@497ZWvdBIueL8V5gJeX7W72yfJri=&J`J3J z!VvfQzxK}}F$A&=0B2WhYPQqecNQ}4tEjN#TLlFL-aFhu<#p7`F}pOhTLg)12B!oH zD_l^H=K{$0;?%SfXp9!qehrQ%s^5(($PpP*v`e*k$+9$jhhTxxXLJZb$~fE?spBVk zi3l>io#|1L)GTZe&$!BYJ-zY@C|LBQvO8I3bIOFC@b8&_9U9#3v*G1@X7#0{msi3S zl}VjV_A}u#SdKHTZ;o4g-`*2=Q5n#q&J6maQP&p#K^BYKqS6JD=H_p!R^J_6>sM8d zma>QWiuD6vq2gM@3FRnHAt9mb4vP;eCZ8TP0#uf{mmdv0*-h3JdAY<39 zhWX?bd^;v1NjUhfhK`(VjDLzyGl=u|`T*AUU{B#Er5&x_#Oet`j;40X`{tr=9tBR% z)W5OC9+@f8XAQ-d@JJY*@SL7x^1i(<;38KS3yzsw>F`MUkAkh_-6M}j%T*4xyF6PB zgTyYpc=6)6Wh^|!nz+v)28(0yR03oO@=De_2`!MzE)-ROwX-v)lH|bQ`EI64$~0hk z4AgXcr;^VvW|MU7spFl$Dju9lG0P1p{&XqfI<;B0S72ZVf4TxvL)T9MB z&h%1fwbaluT{nN%dm$^g#GSQW+S@Pp^-iw#+Jr^I*mTR%4-=$PE#>~RTTF#JI3a;Y zaNIdX6tIik*D}AWwUor?dEk!YW0(PJMG%!a;w>9?2-q4NS4W4QGw_-;xZ~-LWYRAPw%c<%y0>W0=pcNG9fMVx7kg)tJ0$9ix&aiMlg%VmXtUk z&e{edkwJ3jWo22YoOB&)czt4~xKD}q*`aQwqg9W?x6H>kI|GuGfNN3AaVIq5yB&}` zMSQ)_&d4n2n#t?vw?lt%MJN9p7@GXSg9pukZ_q06^Pqv_o%;#m(02Z22d1^26uGkL zP_Maj=g!>*l*JvjVE49)+5mRR!*X_SaX>btc0#^g;kfPK;`Xpqi$I zMHWLa4${cbebC6`aaPzuREL9yN7(~gInnaD(Qn4{a5rHI0L(j1=B9L)#G{aTHk&$1 zzoray`#eBpk|n*uRa2HI9Iv zpMPz4;!qEcSj@VR@ff+#3vl5afJ^TsAj`ZIr}kJy3y0ViV%zWXJSa*{O(m*IKW~{q zS`|h3yP&A~30hQCRWJV|-S<_%lSlA%r}l9C0~ZtA3tM*L+*T^=CZO2Y@LMZ0Z^FaR zA8vh+myX`0f%r;AhO$v8^Xw0}HlzqcBP$ubV82M@RL{`f{pw!FdC*X%%bH zt)zN>iiaD^F&ty|eWE@02t4BFNiO#g;Pa5~Z9y%hNbP(xh^h`ora5xRG+yGlJ{R%& z{2!LxjHVn=(=k1D8@zVqzxq?%^&E(Gf zlONEkM(CpoRDoU^9XXpm+eL#kPr_TA3tg5a9tY2*~DsN}}%(hi~)>V^xa?sO*(sevJ zqqHa4@Oi*d%20BZxlpTlV$rP;+8tRr(<9(n7EJ&@<=cp)H4}@IH6yGKhI3@*a^k_k zj-xEJKPuWu^qFvY1Yosh&6= z!k?;}`f*0fdyLBET&nNn@UNKI*vGV;e=?<)bvF8 zsPj@;OP~62_?aMI*>b{5|AF=LA;E`rigueV1Ih{gap$Rk%p>vHIIh)a@8SVz?;H23 zH?Nhh={|U9r2!}#qbDnJ>t!F1pThTh&Nn44uwu-)a2v9JRL+iK%Q!8@;gP^A+YLP9 z;yaRr9y<&Qiq}1k`gf_whQeT&tG?Cj4X1%Cc`9n@Jty_2iS$(gfgJ^QFJ>rIHsC)N zT>eo&opuXQJ&t6K;%FQ#<4uiatQ9}WhFjEAQtXhjgU;hCr-qy_2Z1u#^3J(a>|G~9 zq4hMopg%=A$!ol=b8=$XD($QdF@A0E3027(#B66)r?4DxFh#9TG(HVBs^!BcJ7z%4 zH#?rP@?!!0Ql*knL^L!s(}0I6a%t&tWdq=VY;KR{O#>kP%^I_z?vl}06sHH_i_CV- zXYCs4G>SQ?@^qJuUAl1Y!qv4=OD1XPFn4fpGS!Q7QQPx?0?GE(_2a8Y)2_J4DX`Mu z>^$4R$eYuYxPahAieiu6OPkIZGn*rrU#HIf(!OMG>wtwcZuF&F`}B&%H}FORpLu9% zI^)q}a)0$6#{gfTx;=*fPU+*J-mH`J?}-3QRK%C54`qu8vm083zUm6MO{Xi2>n)ER zS&H{Xz!4tNPjg?;g+?;Oq@BVYT?@UX%6KFSUt&ry1q4<(uT*Ygw0hrdLoND=mwe89 zUp)|Lq|v@KauZMN4PqKF%KcMX4N;Q7jLrZrGvKwl`Rd_E!=QlIr9|1sWdeK32QQ5o z%qWVJIm^_!*odIF4Uj=z`uQ>~U8)3sQEmxIWOsJuXWY#^MzBn6JC5w!t!C1)GZ(>~ z5y0fE^Iy6dRN;=?AoO@dB!^4+I^vt+H87kqcMH^c?Nvq;hb;jWJsIw8DEW)aE}+Q3 zihsa)Bly_vzn9o|&+3tK-4|OQ{d-rMxiM`cDki~xXGOjSGn&-4drDjT9t}4~lJhCR z#?J4{y#BAb|E9C4RlqsX$7eDC#~B$etKU5o%KN{*|NrRv?s%%(|Nom*O47D6O0q&_ zWs^_|A$yj+v-df)l(Iv%?7cb8IXFsWZ;pM)o(BhqW1Qc0>hAt@f4-mJKRr%*^f+(V z`?_A&YdpuVXfJcF}V!yJ-&?XSCsKA^4qvZ(L3AqzhMvXRu_=l(5^N=YGS0uuP! zK!l~aqa!*kR7$U>xA$eZecPOxt}Yizei6u`Gz0aWi@24HfIr^Tw^Z;zI?I~Fzvoa_ zXj!e8!Swj=T^2w|xjbB9l*~WAjFlD4GJzVve_C6td=$s<*c);LkEq%jv4_s; zsdhYrT$uf0BZyv&BerRHvDK zuPbSBJwaNH$U+9T{MpI_n4sG|ku#+(*PEu_u^E;T`e!;@a<{|{LMtrh&YpkM#L~&& zXk6;6Tzf-5$`M6_R&a1fY%V9U?H&c}Mhf@Wc{(is{KI)^2PAOQBs~te%iJ-$1|Z9L z9VEP;FS`#uCF^`QcVqALp7yR)f zkbO;18u$)W0qz{{6;aVUL>Q;+xqa@03E}`&Cwn|W(%je!U*a;eF0K-e&PrtSpUSps1XKqVPfI=zyT(AHe8Ui~e0bQk z{K<+o7de!mm5lJiXmgTi%XTQ36AKU ziWrd+_F`hT)a_>`9`u!#BBIm&0VVx+^v=5m6zIcR(jP>cf#Sd(Xxe7D7GN12t8dM3 zy?=d*r1ab!K2O~{Tw!Yt5-@5H+`le(32yL2q3);pfA_T5o zdnSGC+hAwO&C8ns8`xIg{MmjkC^D-mN%E`D$2wL1b%v6FrdbV|qwk+WjC8(+uY@H= zUH92uFq`cv^C>h|Nqe($rC+FGwpP#d-fU=V<$ZhmM86K^u~Ku)*_w1kv|V#+a~OrG z{et~bar1^@@?A)?0@u(G;V)kW@6tzfHCf9;urf3AOb{%Nmew^X;asEbwV&bNBq6b# z-b(7vUf7N5ndLhrIx^Esd;2d-R%e-|v&XybnT>PnT2E)>ymSg?R#H-q{*gFy+i}v{ z1*$>p%`nY8wTzZOM!&T*z^>%ZU~gdgp^xbOMa)!erzexoeyikO8gGrybW6&ip;@t7 zrqo2SyXoir+nMpk@IslB?^}<|#`b?%qtnHfc*_MiwV%d~x~3IOeLr<`PeO#pP>cTi zCBCs6t=e-+!D)_1EUNeXX9bF(mh?TSyi@aI4<5Qlwf1Q4ZuBnNms;EQ(kNAA6y+1j zx0701AH+av7STzo0}UeeF9`d-JIq>@vrDt~?4|5bKuDd7ci&QETvfFkvJ2E^y;ZWX zex6i?CHQVM&l)S)3WGZfxDg>9B}u?DV!kza9cOY4vlT}j5^T{2zvK^o$3QH&I`Brx zkJ!kwOi~{qkZ)v_k8%)20f|@>wjL^b3b&dH%Lcouc*=eBHfK2>>syvlJG=p7uZ=xSGEuZ8UxOSO5fasTG$P^uzmzhm-N=Gehx6dtsh>q`NE~DpK4-uq3=AVkRzVjo^hsw=Ni4MR6n!LhMcl?5 z=e*X-LC<0pCC#Ii|CZy}@E@6&%oD0A;*!i0k#{YBj&@`usOFoNqpvfO9egkQ-b_ua zYIEDOaCe4|ZKOyHZz4+kc%c9BfH#Ku3RDbx20a0{05Cu3{;rOw^Vc)z{d2+HSgnW3 z52D41&UR@NaAu4-MNq+XwF2Jy`5-SfOP_Mm-rNG&%Q^4hV`iaJo}VP1ml5TTJM0Qo zs470fxNVgv6d%$mAL|qfeTiq`>>Tw&bazMvm%3IeD-L2HGin=ixFdTO3<2JfbAxh$ zLM^Ss-32bkcSE~tF-g_Xx(Rqv9$OC!aW6-9DQ|n!h3U3>!J^1s-0DX0T~0z;2Hvvl z;_R!i6Z3WqMK#-Tj}pZ+fzYW{q2b|LJxy!17p`prerdl~RT>^$#Fp!bSh$WhZH?iN zA=%h$=~S;S4-~|ML<+8kgV_yT1*&E?<;hHcCQTEPU{W-`bl&E7iuUs#SI+2E*_DY_ zz(a!UKULDMmICoRExQjL9WmiAk$Z`mdzfTgWcB7M{`L~iyz$BSVQ=NqeYZq+B)ku1 zc_29AZ4`TUItk^Oc8eVWUArdArRK0db!}ltGOKynxvbZm#ni^jvghoNxYFS8>4FDh z?EBQTEPP{Pd$6bV(N|+_KQtPbxFa!?dw@YBob|?0nZCn?c$j5sHVQeCfV`)Of#RmT zP2+Ydf_=s3+`{zvr60O-AIjvYhDwOKF0<}E2c}}84UogrL@9LOUxhVRshi{VP;Thb z*LXhByAY}TtSv2W?!1}aBJGyQqK-(BabowU!%-18q{=#E>viBe6)(&an+L`fb4h#d z-N*Kr9du}F<`SWpsz;M7L5Efk;u!~`@RPLj<2A_Y{f*Yy`x_*g?G7NZI(3B+BTaSm zWC{&PIQS6rSLsEa(`_?|AjefQH_uoq@f;fh8u#eTBE5&?nIp4C64P(co?OKFue7f7!utkEF51MQ5Ad}8O#&P!uNiH}twK12)-8f=75Ea3wrsyGSu>0hm&ULyqr0{% zdB-+aIcbxHR89}WP<`~a4ceOhgWvVaZb4k3sJ|+V@_Hwv*Eb|kb|7qhGBBmc+6o|7 z9%Aa5Mr$avqKJENjF$uH>Mz-Ez0;{D?MGDeo9@U9jqJMs#Xu!jl4AzicOSoyy0acb z^cit%@kyLLL|{}u%AKaOx>O*$>GFY{ybs0B^_cXce0IlPr$XaaQIOIP^qSHcP_+l} zN5ZSvO!Yeiw#V`DxwPM_uM}JNUPlL=VwTVW6~s)E&uTbZ$eq+~QE~U>Fp@n$UBFgy zqyopc$q=I&?`EKAiOHT>+$>8z$nS>yJ4uCsZlPj|GGELjrfhCcP@PV|H z;x6TlJ#syvsZ)uCY9M>{ZuF1T{!JyH_GU3r8q_DQHq_}24Q>cfME3BU472Tkf zOLsA{DBId-eU4{Llsdd?df;J+z&ESTG{6{-M{NciXF4cg>nv~uz3b_r1xyRbX>GOB z(8rUBcayxg7*PktygD2npv5OziV;h(eIvd9)gbLua#10>1H0e-i8$o=7Er{vMeRyl z3BU&*zvvyWv9lz#lIN4WQ$BQ@Hf}NR@wkhgBJ4uQp;d0XgWI!Y7{$Yfz5;xAYzHcA zwLu#ZZLzfG?!PYmYz4QPNSN?4aFq_e$9j$YV&f4}LKcRf0CUA@mzv-` z+8UW!<F$RTT;^`s?(M>6MNTH^e-|_{s0QLm zwiK4V8OB8tdo?4CjRo=wH}OZrJy+dTT=raZJ$YLniW&OU16$ii#+W^`SP2ooS{Lp2 zunNf|d#;t8=!GR?zaq$Vc};K0`j>>6Qg@AmBaH}s#W0#xbc53y;w+V8rz3cXj!&Sx z7*boNs$L>&9eD3%_F>1_TOBVPax|8&C}}&C@!@^?!w((Xjm)sUu}2Ud?WGu`y&ziq zGh%LZ@!1NM8&nW&k;h!^tT&d{6$G_ij;x=^iM=;Jp^a4!>op4n-NJ&ezvdP zq@DiwYf>+he`3DL+>pvJ>k|57bk9C9${X8Du$~^fVPms2UFMZH{Ymc4NT8eYMzYrN zq}WAf)4nnl*mzRkh>rMZtcCBns*nzxt3KKf+fyP}KL){SI}Sco)x;F%i%1*X(Cpt@ zs@yZ2_QVygn|#5l;DhzB{q=MUs5++?re!$@G(&ANwCthlG&~C7))8Tzx6ux(@!27N zfiP`uDTqkARHcgZ*eNo^)kIf6w`SnNp6spNz7QZ{j^2Mqw4PmDs!`el; znQ$5Nfe{fx0RAH!M9}_E2VQ?6QUohdgWyHq%T!3}rt`H=Mw9R}#lT7OpG}E9L2*b+ zj_ns(v`OzRFA^r7V`#Jar2YZ=ym#Szfg6p6`lnMVpU(mz_Y^OXxp*iKAkqmm1)7@u z+ph@+X=R31tTmS7tP9V68#I}v4}{9JKiWE|cV2Vm-~cZHCKGXMW(VT46@3sal!0_x zH97X)jj++zN2n+-nD8oItgw*_FxD~@7h*~n*XlaVf%;%4S?-Ki_4~D0pn_H)xM^vX zEykKv_YR&$N*7zdwKuYhLJH4o;N`hrL~9Ux$M@rr<6hNIy;Lw#K09-J$llWz4s5NR zXJ_1%mG-z3@CMm?X&LtJxM#^C6x=*KhCC5TtML_gRlEENKxB2vn@@b{=(+P_#Ne#l zTt6Dw$^VRtc~s=z|JDKk9g@kjV9ImVGEq$TrB+8e@%TeUAyAgx@hBs;{mOM^h~1hR zJ*#Im2uvCp@ixc$62Aa?!_p(VC{6PZ3XWyMv{xtiyV#L2j~JXPi7Mk){AK!1K9aBs zGw@pd(sPT0&Cb3HV`Eu5Amp)1we9X`I$bWXP`=3f00D7VVrK1fYb)q0&X1`;ckG7~<|hSft$8PA_#PdRMX+J;%6&uZC9AIVpP{y)7VMNOgmkH!43r zGluth&CL*xV+;Y~YfoR@xt_Z4M~X%2{&-U&6w#uNRfS#_3fuv>Gdhugg(P^XD+O_^ zPve7|TwZR(J@iU`K|wN~Cd)Ec;oy~e?+@0#SA2eLsJ9vc^&cj7K^Du!aLwUUZm#XJ zb#J0=pol~fFV;UNRGHpMOS4zlD+L4SIG%sDJlOnoK;+Zm0mVWE!1rg_ zbhZ~7bKJTwA4Yj3WCh^Bix;_Tu?P zzw&z?_Tz158a!%aSfImK<=JQ4ah4L0q;<^pp$7fP@sZ;2*i~tP{#kq8WR)7~hS(T8 z;TNcmzR@Ss9WMO4Bd&8J7BCU}E&e))s^Fci<%>9uFURSos2vt^l>ONjqPTi} zLVEhjv;MciFBp$?_V%b#CVf{x_nf3i)ss20FvR3~>BAEwlUN24V)?fQ^IMMqwR&%Y z^7+H=7@q0Uk<6cMn4_I?lbdB%B=<}0=|;E3>C#2w87LO^@XK`R35=FyC9;3{O`8z6 zg=_Dc4c0k8kZ5}wpTGS2iY50qoCywtKUhYw-9q27qmOG(^h?rOwOHlxhOg56iue5d zofXbt{0-U^9d+dTm3B^&g9Y(KkA$TM|Ehcg-h$yOjXLf8@YfFO5tlN7Hh;~qQVvoE znleTnOV0%X?mivERi<7@9Q)t1`BdGMr)T!sX=oq39I9EF6{1ZkR~@ZcNA@;8%q>X| z(qBx7;WFJ`T=K9gGksN#31F?Yrn@Hz`9bNVV$b|7HT|isq6*#Bg`5Bm*`=^p?(a-u zUyDxKima~rV0~S>i%x$mf8v2zH7qp)Es=z*_q`qm7MLeE@19$Mr>J9J8nb^8`BFWX zfVYXBjOp^cNRHfjj1M$C!>Gn{)1>Y%G@{4=-UCwWBUB`>zbz9??IC8bkfY?^uaFPS z_bCk@8yK}DLaU)R%Lu1czHCP;MBsCG`8vBe&t1M6-*$Jdp&6bm7o+2% z2URtOt^KS-keYKiN^IJ8Pq^Fmo=nRx81d`vu{6(YZmX;(n=5T|sfzKIqb#9)fT zD;N)sjCH)H+3}Kww`LpHr&Q0n!6%n|CuDKXghkiY4pwiN%8>K0leC8gOXUhEAjN}P zm%VL}$G2DPP>xk+7Z0s;n6YW9Ccx9uwLp-}RXf^WytTEJG}Z|YS@KXXGCHK$78HB; z@1pPoC<6zDg)*oEaZeD?JrBNhE?2VD}C3xKDgv*`Kv zDl9xc)Hr3K@jS(lQJ1lG=jG4!2OYKx@wd_lEhxvNt7#u3gq}-PK`jO7pgXcJ4@x}= zTaNqVfs4wzUQrSv2l}XGD6~=5xxo>7<-mI;R`x&=Vcr^x8dXd};ZKQLVGcns*KmFC zX#h087`&!4iuU>9y{CN#_*Px6Cd#0I-`NcaZ;mS zqt#ZAFOQ+y@CRA^VR;?mgZ$Wm+7q*qunLnH<8|mwg1TbrlHAcLMZ8GnH>?c{jUsgL zb;;Q%@&NMr_m_BsTDI@y_E25A5f&F0r_g|3=v7qL&?nwhPx~JgD2a(cFiVg!XkWa3;U z7Sy4@rV6773O_TJBteI_^Tuw<1#nfx9g#<3QHQE|Fp<#}0%SUcd1I|WdDkJr(a=|V z*CwOQxLR$SdAzp+4{|@oUN}w6GgD!I<=ut|^ESSL0+;CQfGDv!bDem5Z|Ye|e2p5h zOl1&V_)*f!NLEgbJ(t`d zWnxuHO)44_z*fd3Y7h)>4DkPs+|?XiJI37A+KQ@ZPG}641yL$?Mgs3@sU}KsrueAq zg;ZWx@t>hJGyw34396AM|MQn=Wc)1LUn zC2`xgpYTFq?&xrdlWlk#!B;n>e*4go);=t6)w37E5<<=N8nRH}*z|^1o|oI9kwfWT z;&6mun9l-c{7mBAnx{Q=UZ7TxDqq7AA$%SZcaW0{!Q=e4H+^K%&VOLSL3B8@$Je7r zuDD<-h`!oVgwpjVtzw46u0z8NZM*?$0&esXl_qEc;T1j8=Cg`()M{0yJ)YQMs1ZDg zee`*m#!}QyF{|$FS+;8&S?h9gz;31#SQyXSJ)y8U01l{#Xau&lHqg*O)#UF;_bYwz zQ~{?H!-^=LeQOU0A`Ngf@Vfx?Y@D-s7O2 zbM{W5kb;`eM5)QIxr&osFuPC`e>!(Mx%|4jaPL9Va zu;ut+>DiqbP>++`-D#Z7I7Bd z=+uY}g&}J%G4I_@BEAx|Gq)wLrISsyI{0agOeSK(zlQ7cbFo^)IF9ECSxvf81u#>Cmm?W=HE{40;<|h zWy@(TXnAG3)-EW&WQc~Q0Pt9Naeuk26Ku*}V;N}=0gJXZ418}4vbQpaNKX7CX=I&{ z44{~jUS%XwMc@|~ni+V9_A5SqL=)NKL!>zTSDPJD)xidRCF3Puu`H<2XT2EFuXN0H zDDPd_6t3%+N<^=lpo(u$myME)VoIm(I%2odMN5@l#45<`tu}=O&szOlzx!AR3NPy; zJ|H4Sui?PKKoz!BK8iT)s__7gS{-9U8x$ERCKl=Fo2UfZ8}{CUNfj|Qm;(UNl$Yqj zZ1&>UAI{>B^V0&{60xKCt~uHH(IC~-mk82Tyt}lyAg;udoeep4*=}s*AV{ z#ShOJvWp8S=LQps^t4&UgW@_nJDN&9`}LRkglsNHn?BOzoplx&M%#*9dg$hM@DUf% zdmudWqmaHFpH#*jZtuB`9aGyV6)=l&Lgfoy-juTAtEi9`#8g(o;bQVj>xz8Hu0ld* zqx{^F?VncNuD#0c;`zZ-A>zQ3)N92++T=q%1 z!4*1mDQ$r{y|CbXHrFPvs=!mvxdFq`uVuRu6=#BaQ}tLcC$k^@fxFNTI}zphS*A)# zl{a6-Ao)@vF|>ot(Bf!kg_u}GB&h=nSll>dUV8ORZ;yh$c+M}&AYZETYj4d)EtTr+W|JbOn9{O{Qj&R1KdPOcKsRZONo%l(* zwL0I0<(m^+ZJ_8*E1>%IdyujPQ0&Q`zXR-GNa*q4jlAsaFp|RzXs+#0Nr1-0p}0Fr ze4~lY@V~=05a_K5KKOAtC-&JR+54pFVyY%9Z@w@mK z8Y7JMfuRIq)zY%2j^q^v48+`K?yP@1|3AKAT7>c-b!qzVjQ#guh*Z=A!$Ham%OqnZ zGa&2OJzgEwPXwSC_jY~sJr*h7s-;^$;j6#b#b4wnk<@r!A*Jlli}4~HxRg|rTxN{s zk1qa%Tm1UNUxdPyX%lilfNRZy<$XTJf)y8W809RgCFJtd1FNu^|BecVq;CBRqs@N_ zNf+yzOqMF&#v_F`R#K@qx?@_+l@3+x04sxn{K10<&Xccc5wj$0KiesgeHwOmRVOTocUnM9t8`IQH9x)ykvbHaz1;-hsV2=b^t@@rvQ^5e(q3 z+|Vi2* zATOXHE*TT`5Uva7+$0vbYN34jd4=x~1dXQ@de2wMaAHf5#Rzmt z^j6d`+%T&3g@YFSs8_GD$OAAvPRr!DcCG26wn4WQRB-ve%dsvVKiNjBY;m`HDH-H$ znzcUSA_5s1F%<*}02sVJ^}^~rrD`SX(k?~URK=Y2Z)=uv zB~}5U5y~X2pAuA zpi~PD>pJMpt?fy?UW0MHX}UXnfqeeT47XAA<-NHz=HT;K=WBQJIIJj+@H$XN%nhY-VJIe=v1c$AYm;0)Egq~VCsf9||8$t6dK}PPXYeZ$A9{b^Jaf zq|T1|L=p5L4}gc@MsC2*s}ncw?lwC zvXVPM-&=*&EWPwMCkD~%t*UPgC@}*T_l-{MynYq+Nb>5LReu(gD)WM8?paq7+U;l! z*{!%MWSev=I&pxE_{T&11PYWZrU1_t60{v*q4xMOYgVFr-2BJY^=ON>Pgj7iA8~GY z8IWOGfp^%v_l-F4C@QC1dfIM?J-0}I@AO@7KYxN>!Z)=q8XeYs5f5iq-!C!9{%cc=24faQ$Y zzkPWsVk#wA@a!LL%-@?Pg_```yLTjOo)+L8%o7zA73^HKRERXg*5IRoBT`y-iOxs~ zuyT*RZ5{;DocaoD;OAraNOZnwNbSJcyuDI=Nd3vvN?r85m=?rXz0Rw;@R5ZME$S)Et z+l6^C#+5*mrenUbXZB!?L5Vzk$3BQ1^|%}$cR8Ol|Etpfwa{*$Ei@O&vTBAp5`hYpiI~Q zg0gZr?-uGvW{8vM>#andIKSqi@zl7S>fUYAyDrB__F=frIJ%Bfi4PPS@{sO_`M2H$ww+0m`UbpAUebV5vkNA|hDF^HzzuF!Yi1!XJkUeW zR-TrD%?frQTIuzzplB0Bp!kQ}A#yP>VYJSif$<(gS{+*~<&M01a_LLni73*M`Le3J zK^5p91Bw6IJ#NMO^1qa5PTFDMMn9p&a>*+7U;k!+z7>PC=Vb5X!mVPp!O4k1~opB7)f$;Hbnl;;E-x|_hLngh5q8<+L>xF@;~ zTay?O6cXCF-hER^>$Rduy^fM`i{U`PU!vT3eK$%%j(1{)^rW9x{WcU?rNB-5-4fMn z^LwlQjobo%P0=JD8X6*vkz77{K_HMcTOk>s+G3k>&?eAn=PGedoSU1w?XM~z7B6Op zsWs$-PoLWbF)%*A2LzwCZQreaZL&^rK+>2Qwk@LeOdtXI{o?^eMdaf4FFY6^8QsG^ zc^&sBeC>C1@zgb$@A|v}*e*fGddJj3w6}Upa#oH8FgTdTR|J+ceFP*K5pEo?skAEZ zgv*7pJg+0`0f;TdQIdUMN9*99yxjmgk|ooT#QW#}N9cHdf`mLbf*!a%P*QT?EiEim zIRvrfQ*WFjsRx$ue7W!JPcuJTfXxVY9xD>hQjWJc)``y7J-?RVG}Rc}+t0xv05_Gn zcKVOs=1c}jhNOHd8z>C@@#CNA%NZ_k2KEYI;}QA!`Tm+a(~+w8j`Vj*;2zbv-1U;l z(&Fdha^n5*hS9;Q-1z0e_scFH_Q4b!hE{TPjVO^DDYVTN>34@M4gQNu(tQQ=<-`c@ z^nT6b{>YYq*v7d#`)l>)+si{e6}D*qt)7Y=lBIa}LWg#~&e^qU6P8cX&|3H6)g0{X@Y&XX;n&_W1xycGpXB|AVf4o$0J$e%d{({D!ZZ71 zo|ISLdpQ%J?&SsF2U*;0;%R`T+#SP0IK+g!Z)VD)7Y2!&S<>)J1z)g<;vR8in=f9F zqg@zQ`&f~MlZCeM$b0{ytu(Uj-@03;M83eaW%?cPcGLBZ;-=|~j2;3zNp(FvZjh)c zLoB^>I{Mh9j=FANQl_V;PetKlV`GzEPT;2De4p=vdSDWFA!X4C7XO#`sGBJDpC{+< z0CkrwMI%c!;m%K-2I(lq@!mr}MoTANIw-C`9eG~mj;vsIp~hE`5(@hG)t@I3)c{h- zynbwU4;)&rpZ9^#XzZs?oWs#04oGEJ3h(NyyKE-4&y}631-cG7&lfXqaEdVRcLhod zri{aEGhI7_mo4>P&H`>z=xya(#!m79IVVDHfDQFuz$k9$RHowa>rRbyAO8(~o{5=G z4cn1(OLH83^jrgs^g-KxHQsL$?PFA!mVas<23TEa;vY2E_IX5113$SZ>^N>GrJJWZ z7;)zLNs>IGBWMGXu8d-NBYHy%GtP7?#KxWLN;V=4yK%)U@lbYbU^2>Vont0Rr5&xUX|u&o(by zCJ(p|O}g8ap4}NiRHWT9ynp2H9maZ&oT8oWvr6pW2jUDbx!4PRw5asVM~&(25yzN7 zX7Bs1-doX`Oyce&LPR@?xIZUNqLem)Dye^Z2KWj4A;~rT2%F*hjy3%^vUt-l@%aWH znfJYSPor{%&qtiU_Jyt?5BhBL)(J9IDmt1CtK7W8NJI+*P7@Qabe67+`vZNS8s-ge z0l?)r=5FUsKgoIcFe+&Jw0h*H3zgoF&yW{fyq{Zei~m94Eeq#tzNn9j1dNLx-MtuA zx|=34wqCeu!NPFSZ?AtrHW0I z!=XPF@ygWMue8&J%B@KR`3Q&P7UFZ(W5VLDy^C#*_%unSzO=b2hP+$V0|=Y@FXm>r z=+!@V%Rkg`p-c+QJI|TbPEPO-Jk(4#Yw0KtQIDIA|00`}Hh`IUeqBm+vM)2=gvxUT zcVT(EK^+1jRE~)CPfe!Z*W9lNH6a@SXrOVjHime7i1=#Q;lUm(HyT)9+c;C~m72HS zJF>#7EOz(sEn7$!05m_?)T3TqiYrX_23JURb{}k0=~?ty-?ndodu%~R1;ZR9_CMoG zqg<~c4quBcFtm$3L_d^|O*FUk_Ebdao8y82dR}lRiv}|9XUER;j=3=7#EyjSVs+yS znRIE@5)-->pb<&~crSlK1%E&BAUWWBMuAv4e0fFEwDFWEaUZ8&4p}5Sez|LUehYAJ z1ieNJ{Yy$f3flxce`f$BF)~tBN;e|HH%&cpt^v3GRj5`d?)-=Fxf9KF4Y-3dO7>`< zA<=VwE0sNABC0HxOCv%giXJ{J8{Y^5qv(0R<;(BD@@yBEgTt=#iR)S$c{oAhH9S^L z8weR?FWu(;AAWlGVE!S0NrM1B13C@SfOynGYwUXk@dhX>M_vQNy&@+`pY4xXN!~4v z#`Pj3Vm_c*S8&o5rVUkG1{!Pbc!p+b-1|9vhySy?&&b(2r*LT>EE?frH6pIH}A7PUS4595!B3I!UF=KQ-!H50L)TU1N|VN229ndgT>2U3;r+ zA9p{@$Qc^AUPSrrmA}*l^`r!A7<*rD70#a@cBo*mF4}DLlOUN4`QRGcW0Guip9bzNg3-Wl^MGY!dJY+!X-Am#S6pdEBm1 z&G&A&sbHbdLy@3(x&E`yFFqe1>F^ofns#4yE%Y;L?^{_3n^rG6c?W9?bS8obVmOaz z#-P%s^8xoMfBozo;&CP=nd>S!#j|@U`Q-)!h2z&oU@KNG?_Y!~##7DR1#yD4g58?eb3jGk%QF;lmIQhjV-isE1(A)87NZ>WhM)C`cG{DKK)Q+ z6@|n-R61*Q0|+_siX7}3p;C71Wc{YAl~infonPTEOw+bh^I!T@(KWG+$J_`9s`?65 zTK`Zf|5*xGN)n5CV<9WK1u$o?K@V}s=IwDp?hQx}EMJT<1b&e8FC&h(#$*a9-yJc% z&T=5On z4VT%ra1dB31*tmZ{t6&kVx=b1IERmiNG}|{JI`8k{7f+!w~1^W-=#WKQBX}vQBd|E z@|o;6KJIFhFXWu$T4&ViLavhcq>JPPoVm|W^b=klH*(JLcmOo$aiZ>(?H#@%9#`f6Ns;m2- z@D*~YH8Zz}uaH`LQQK37Rx1TwBeMicmd+?7OO4vsdP!;1i0~vX)Qx=&XBXV`&bZ=( zc0D0d*j;8i#J!Ma$256}UVngKPBdHPc{hti4bVxBi!{wrk6aTId#Fe6Qj8a^K=$Pt zmNi-1qV1xCZDIF=?DbknN&R^Ijl<>j{< zzA3c!@?+R&_c4FL{5-~X(r*}6w~Pwo@%Rkoy;+C7f+)G;z0@;>i;9=68hM_;kr zM<=M($fhBZPOn$-ii;~2(mTWUcgC>Wkd@CY7n!VK2YBP?W>LSw=$ta+823hrmH%UQ z4(@^r6g@OdROo2&8(y_NG~)Jx!_`qwO&5IaJVY1T zTr%vxvnds5Egt%944B3UFGRdbLRix~-c>XR1OmYRHcJJhx(|MTcp*Oo=`elzWA#9$R2 z&WdT+Z@-nLDp7O}_u74}dMez0W)GFwlnVR}8bTbW$LLKnV!BOh9^e!l<~~{imLv_m zcSu{Dv{7a|M7glaDa`O&qnp0@k0p(jtx~h`?Pz>OPTL@_f%Ny$-Rg~@5cR@4D-`siuEopa>yGa<51uWW?Jn1Qm>+7FO(Q!k zn{apx*O0Ef__=5uO7XLlkIB4Kj?V|D4UeB_`}E^v%F^13mKX>~-A~aKb332=1b1g! zS*8ux>WuVkdTU?%uI(!I6gx@cy3#_H%#L+o8H8I5l5q_*4)XK+lUfKu=9TTy$>B7l zDrR-Ib=WxETu0~A2gr&Pz3Wiy_JSA@zO4tv7A(wG<}|LYvaO;NQcY7=OoLj}8E^@x z+XQBwQlUnF*Cbo>?6G*^);_vrMepc7Zu&!POeSlHC=@pCiz(g)f7Cp*ydQ>J549q z15FaqSCPV1e+?`Rk?8`oDH??S!y&J=uvH!bM^s0um5_Idi+#JmoG`cT54trdCWF;RNG|;QfXN`#?X9Ms#p5oFBX1 z^&CB&xf~1#vLP%zDPr%={}$G;X_HWAe# z@{0pX*gmV ze;nG5z1ZC*yV$fTIdw18(mKr8=RH3u;u5iyW6YsE)lQMOrjbz$BG#O?T9+jJP3+5} z#b|<}gdGPh%yo}_k_kM`M4sOmr9TEr>H7)_3NuEpdj9XF^79}8yVz>nhKu`}#BJ_} zt4z4u1fgklSWjG~4V(69ls+F~!<$}Y@#(wZqRC1O{I(B4aeHxr_8_?})~Ji|;5*#- z-LWOx>jRbeNLki@)}rC$^z3X4h`%cb!%Ef97l-yZ+13n=-R&@JqN4DP-R~}Avo!p^ zz7Lk%KGY#E%5*lnQ-}5}eVj6z2s?B)6ntD%#HYy7tXY5T{eaSuwW#no2A4d2vQ4HD z_o#()i~~sW`QGupyPKMN!)2CG>%ijwA!f9u$LfXW(}g+5?&I*zrVay-touKwl|NX# z!NA$IupyDrw&|+7kGtEA4GIzc4C}j}%}$E5C_1@e3kDiBn9AZczfO4Fa><+obT2cw z^jR~u)Ty1@dZ_m0<^D35pj}U&*Zp^1iAIilnbf5gyT+xE{*yVqDWA)+J<7vw%U7L* zB#z(a)wI#JZzA

8X?S z7kYDCxG&4l@ITerE^(CPWD2$8nKJrxBZn)nt(DifRL^yM^Ief@6nfmtK(CT(kHtjG zPf1_lp2A?@N4LQpLvaJUie#52POKYH zzmVQvq*!=v2D`dP+_4V$a9M0{6oQ|RF(ZykZVZ&ERx!ows4*=#Im^ybQIktNufMqQ zod$zsO5>R`c8_vhS@05+nfnrtK41$S^{x1R#VPICv+3>c$x&2qjFu!C+G1dZ3_G60 z3fk#{Zq>IgwfcDz4hn`WK7JA3>xdd`4!2)id!J!cudvbB26z5GL)&~QZCs0R*xEVW zu6s05Ln?MN^8?F4`g)C4x5@=Y<`FrgBm%q}8J^R118HKcN8>%;iMhqGd5eXzI~ip> z2U=!YH$*Z&wncV)caLEUU3j{jr^~G<&n)b8NiQ5%ApCLe3&YaWPxg#xr6;cCf{wi^ z-U)p%`rW(tkyp^;)BQp*`x`HpN42_gR_D*}(a~r!UCA=O zbMFwp-)jLkW^_r5xg;~^@k$%!A9)}HbP8BTDRLjXkpq)NC!~&(T(~$5VF@3lz`s$@ zgD@1Bvnaw5C^n1F#x&2M4LOq`g8J80KRpFdZ(JY^a+xev6k2^xQPInf*OH8W;VA09 zoBj%-fkqiCp%`cn>uL8m9mV5#*{s0Zpz+&1MpgEhLzbtZ9A*Dw=Ex~OA;QLqZ&KDC zsqgm|GnjL%Ib7D5IV>q^rhj=j<^J{$?gYezvP8YsJQpwppXTlk}x- z^F=t&M_BccjTcX9G6qFeJhSj)zGE1 zY^LM$%W3>k%#iuK$K!AX6aJ;OO*19vo^BySmr`T0BhZ5u4g97ZwcDM<}@7xlDlCXAi)-(h2Ru|H+OnmQWJInZN`O{2IgD1<3 z!v**d-^LY=1+-ikEVl?Y+>dR1zErW)4@2`r#2%E`?rf7aUS5nN7*Cu`A#SyXHu^5c zXE@<5F~0}zR`HVXu}?sW<1ggXC$Jyn)`30d+m^T$auxdLzVGVO`3YV*>B~w&p~+9% z;-!8Bq^kZk^4cprC-w}#ToI~^GTqput>!RR!Wpe&x>GEnTl1{uhQKVh^y;fZqmM?L zef>NJxEK-p?(BIo=E`y_uU9ofl9KgP3D|8iv;RK7V8-(BLRShddn9n^3yz@BSw)oE zeB~;IKn_iL!*8YURAn(o`T4lNJND7od&g+Og&;b6b0sO)9AKo-TNhzt=G#{d_owb! z_m#uFc8*UOmA;%4i^D|KE4I8l2=*rs+%{(VXe72DdKfACR7iPrJ{Q*+x2yvuw(Hg| zZ+GPsQoI*mq0Ljf^Z0=hStd%-=$H|t;#MDG4*T+9-qNN=BQUVvi96wppQoKk@9n%u zJ#Vc#qz1#7onj)w))Tc$tJgP8H#Wb~BxqLP^yb=asmO01bh6y=>$uZ;2b$9-_y&O% zbXqRkE*Xygf*!qg_UzeI8pbwnXoT;&y|srg$gP{*cHd6cJJ`C$TT zrwa@jp3WC|ToP@J4^%F{>Db{>`8oZ#?dH>uK83a4eED^$vo%nkFF#VgwmJv8{(`EygidH!xg5O(8Lys5)B_f2?PpI_r6p7vQJbH=+8&vX zZ?>FF(U0nquH&n*NV$=Kk3TzaY~nCpX>R$Ne16LK@Nts2msz-1ZS)PjEBe9?Fh1*3 zXN@b3wieAZiY$CN@DjppbCOd98@i;4Ucj}mXw9{hr~y$au`6M@Du#dL9+Wy{)OWB$ z@cMkzHl+7+qnoELk9E;vS_qcCzJtYtqT3_>%a7*9uvwAuy|-@n!!wR`ViL9rE_bJ9 zz_I(SMYm#(2TwK&g;}t#i2AI%iNxm_hOAg`+M(_>jza?u_piziou&olrqiM-61_tq=Q(Nv`kBRdd`Y4`XjYsl|<5ozY7V;5A*_gGeY`f$^|qvL5uKs{A}e{*_jmCL^a z`*tJ9r5bhe)t+rbHZ6JWrBAoqDh;PaX}2qI4dxvzqnPmuuX=Eu?nI{HZ4dpM|`V>-)y?QlTLETWku<-*e5<3%+`yVaipV%^>DSFWS8m! zwEEVl&zJL!V$%CmX@_&dzMl-U!@HOA2_swI;$2us9^b5=C@QZNeT4Ij3mNBm>|c7M zoW}b{3v8#N?nCqW6@sC6?{nA$ydq6^snX(khaPcPAC12|AUwCxjpi8s;xe~wT${|Z z5O_K+Uhs~@{32r_vR!5;Q5D5{gukZ89_eDdCjN074J8kqKF`x5L@B6lBgXjE&+R|g69Gt{b!b+Sw z5otJ3Wb_ov_R^odYdzc4><5}^W|@dI&?{du*=}SO_d?uSF=fJ?NvxcB&lOov-=2{b zs0?yw)73I&VfeHH`UoS4{{mh)mnU-mlFyNjyDUrTr%s3-kT*BqJ~Z- zEv<`5_=QHQ2(Ub%KdiF-cx+7ZZb~VIYa9iWeZ#}xcrW}~<8o-0jy!#RB@$vh-*|ZB z_z0?A_@GQri}#>cYM^CJ{A+{>{X7^nA2~R!PP~0$c|7)pblcvT)b^_i6R;h-JPucYLk*jet4>~L&;(4kM9Z8Wuff9eclh*&M=*~(9SgCRZuRz)kJCo z8J)Z%t&x~qVRWe#Q0*pUSz9R)E5nuj<1X7_N-MsTA8_x9_Km&`4H@G{IwDMc5|^D| zE{C%k9b%oAGe>&^z9+ioX0=;{KC$eq8?3q`Fp)27ImfeA2ie<)!t9)vEFyjF5kW#hKx*hN$)Sc2=`QI3BnB9I zfT8){ctkzV<2nE5{qTNxuj~Edb<51`d+)W^T6^tPzd;@`utcrRy&KcgOSe-9A-e`*jm&^idbv3@39oHaT#0*#_7M^nwz1f-ymG+xBu`K4ij3;8jZ~l^M>b4%g`E^~so8MOU{NA!G z>PF;GghKI79Ovnp4mh9XR~KveB&A(wuKe_hk%@EIBPx6pmvhTTWuM5X**ms1@pvnv>5E~?xD^u z6C~u`sS#iR0QOpT=f^wN#rdi7v$?;^^U#Kd2}34CL&7;iFy zY}G@Yxppq3NX2(XSUmB0xS^w&)0#Cb;V*Gqk)-JQu=T0U#$7f7gZRpOD?@4a(Br|6 zOHXtpCPKK<^~%-v$@=dZ3x&TDQpQ;rz}WX1igY;n7$C&~+XM-yAiF~2PvK0JJ@O00Yb4k}udYGs8q70q8jE6sl~Nug={*spcI zzY8=XhF*BE)_IHZqGG@P#8Pk=1Y#07ne1>QOS85tBe}yO+z3W}aVLZ5zMPZn@$$2o zfJxu5cWR2sDJio_8CDTv!9s_pu=I9gC04f`kP(N?suFXtez#8IJXR&Qp1AE)m zJ?dq7GeWFR4tt5qSbu1_ZhEg~<*Fas=*+m}V{@04%I(}iHWsr%M|ZjK%1O=+S9M(D zp3DIE%r4x6R;#pb;{5G_+t%%|_QQ^h+BRd_$tbR<4=;nJUCd}JM^*a@Av>i@Jat60(e?%5+_ATUb^c!|JdJhy_?GvBP-QqJHL(-zj2`wNuCaYORk)oc|-HrwDh-|eYZCgb%w zMpg6Jj&wq!svgB#>hG=9jv@@m6w`Bcl=s&jP&C1;&uQZVn=B(iD}>#4B3O9Nf`0It z1qMY^1`zBZ6n?|*`W~5M%^*b$0$W7cPsCi>uvJ2L`MqU!?# zDv2~f(-brjI=gBxkiTpmu9YFh4{yk!THOd^5$Fzo&(W^RbIuCM6;tr`n|UV2n;*XJf$~Z=5|mZSI;8k@&cA*fR8{lxr5@uzRrLd6 z^S-`qei>1Xm;ti|*Wttm2-U4y6lEGZL-6fHz-2O5^m1d(@Ogs!7SQ-TF!{oU%SA=J z;XFH_&~BxH77npe%?=`2+0IFMpk+U}y*b!S)t0d){qAaxYK&2VdX3U5Y+fOHcN-IR zq|ISam}g6}p1D2>2JW$!1|LwQVg%IyWg+g!}iE zJ=?R>-nOfg`rnmj#fX1g$D^}{loK$&P%6b>(Y{~Ww2qt{^>>HP7e6l>}<8G@jT44-UAVpU8SiwDdyt#gBRmcn}GLXr!r#Cut_Nzy_fMdTg$_&aehW% zajTeSbb&Kp0$g3Y*O6if-=`b=i0e2TLEOG6=uyl?s1C8|ZQLny(xJQ?iw7jLS=sBK zTAw8yf%hncxeH~rQ3JqX8nw~Qe|v9lkMeCF+y|*rff-GS$6z?KBSU5|KO2zzB>eMz z8lDlj5;V}YZjAWe3^yj=3ke@=k95ury|!(YX2_(eu5PCxZbiwJ)#k6fEXm9EnEVJ^ zq#z<(Rty2Fzg=7DbA;^8KMsgr2^YD)r@(Ui$zu+)J7#5;EXYOj@2Ra*Po}=e=1*04G%Xo1 z0byze)fWq%Ywx1hWWokM9NE_4a7{XHRM^fZN~NM$CukTM6(bK1Q##`J3bh0JyQ+G~ zUyj4&LxL_`lC6F5$<3oYC5E98x zEckbp*!SP;5H0s?(`Q+AXhMpd3UWE-33MJENCB?zit)p#8-y{nDhT%4{Sfe6qyH)w zGQ~lA1MzZ^dtKOMB;WT|ul-1kOL*_hIf|l7_ya$U&(S4yZc99{Idx+BXlScA>^=uZG+X^ZL?C_xAV)+sap-XoMzsW0mnu(Wip z`AEr=NRoO7in%A@+wTU=#m;G+pXq`xH|jzJ7G54WwNl~MM)C7Hi64mupX!r9s81%s`R_q=;+R5 zSBxdm2|^C{sSZRzD>w$i6;;WUq*DJ$6zsI}AwVx-!`Ur1Bz~@KOw@I}xv2;Lku&u( z%g3Hxan?)cc*?pZiVR0|*bx0Djx(W;lc4tetIl9;cDR5!VAfhj+}E8erK4nnw^yaA z*0Sr@JrhY)M>>XiJXAJ>orG+7Xc|MZV9w!{6pF(4mxb$~n1ve~Y>P;2wjFQYx%Moo zkA8@`Ig2&ou~sPUXPOlKt`(_Cbq3t zp+J;DQbA9L`ZS`LTJ`MAGgN1V^)1;{LScr|(0yspc!_&E`3}wp+9`64kGkVh{VxT9 zD}{We7pl0#Q3P`#?X3+KW(~mt`_u!Mp^tXX%STRaRDxYmB3T!geK->*vfah>9?j(# z^bB&3j>Sh0H#8I>%O2gW(^lx?`KS|nT*_xR#Iza9%2Dc&*OEyO#MJS6D_&NSkM-YT zJCbN^?TR!`iPD)KfXV5&oE$vcY|}%x;@Q=(fxC$X(v|%~Y)31ITlS(V&}~d|+77Y{ zfZJDWpA0rX={9dOQtcQ_GHeZ!I*%AFc4{eoX7Tddw@wjJ;I6dW+6s8ww7$D zzqalfgHp%qn1BGob4M9gMsI8Q(yD@Y2E{YyOo+$uGXc2FCe#!HNeG;1LV8;=bi)@Uofr$^6H;A3PS`>8EByWL3{j@EVNG2Zvc4 z!m{|6VmYi#nDdG*voI>CGKu&vP88|pEl*a7ab&7PJx;xC4uva&UK!VFDU3>T9`Fx1 zbd3XVmwffyG2Zp+EKUl=^WI*+oV6%q#R2<@Yy3bKBpz)5M4mYvZNS%KhWB1{96myu zRU8D~HtgRgc~bhepjKTpl-yEQ>P0CxSIz4!zCDf#rw+#XgD;~X=dAkbB0lrY1YM{L zxoKrG0=d2k)pmoZ$J?<~Xswd*=35VTIK;E;10kuFZ&dTdE<(@z+iK?X4<_b?(Q4xO z7P$#aF+{l{7xgONNJ=Jcd2G@og}4tf_enDIISq9PpiM5Pv zF&x#IYuwsP75n&6?AV)YI7YnLY&yzo8QJEFiJ-UVT(Q@HWYx=`ZAbxYAq@y8e6zv@ z{;?Pa{50N-KsVPKb8XFsK?k9XD(z!-HMrwV@~7p8iKN)8Y(5AHQG9L1hWY8fPtQg$ ztq0W%n1h9~Fuh5ISW0!ty@YN92|Ip5)mlvCLY{8pV}=1_L%Cr`t>BJ|w(aT2B7xkO z${E#ov1$nQ_DzTm#9x_Xp}hCwTWzYiUVo<&Blr0ItGQDC9YxugK#sD=yck0&nFcWR zccRRCGZR!7@6Sk(|E*-!n{kS~2?k}w+m{o?kFV%CkM>XiW}8WgvEptC#2PMeUMH!6 zxvpZVeg`^SH^PXIsf$&DKDpbg`b^QKdx+q6Tg*2zT5*J4Wz@EH$oB@!IMf)F!PgRF zp8=T0XGv%H>we#@1Cxb)#TQeT+!kkWRoj3lNDlx}=D1|U05oi`JSidiJ<{o;TDP62 zzFzh|@^4M2Z+ldhh!uA7m1(*X$5RrX!M5^gpcLvln{;sb8xFPOn{m z8$&(-a}-w_fh_IgG}>;V zctLSl9<#(PN^4r?zfM-H5+Qw|A;_<0u7f~iAc8?NOf^O|G*tH8yaTVYqD=V%OhV!w z%J9z7XDwe#D-3VLs_wNy&S!0++6D<(KClBz@TMl}Ng9j4>$qS5>cja#rc6_B*hOgD62;q&A&Bo-&i~8s*LqUbBg$MB*J>gIPk(3KKf9FHRw7)R35Y_5r# zStj5iqx8~X8qE4T_+)eYdtKf4KhD#rjf(OD%}zEqmvd1@aQH%sw35;XZ3QLEbRfx0 z=o8M<*$NN2N1kxT6!)%87eVFM4-HhHLdrD>*;RSo*6u?mZ_X!`4g4);%kWIdccBaZ zft3Ev8_@lMn?P$I3|jd|&iM%ukKK4LdA}fhV{@UWSnEmC3k#N&%QD`F=0n|%FRK|_ zR?LE1TEwebJ}z_c%3bh&LGY%PN1vO9nYpV=RrEDl(I8VHUGS39+(B!?=t_YDfaUd{ zT-}ICif=kRzD?(Ip*Nvohi(38!MB4t0A!<15X-&`^|{{eDCnN^DizvaLkS|3GFq7b z*=|$Y#TEzay_e=WnCKjK570E;qBL{-!Cy15KGte(ZdNrdb{14-VDUQuOuL$d!^8>? z9qb{fo`;Sy!{3UTBba(VJuBD5pNP3}^hH)8!ri3p6i?$&RLB=`7x(H|{`U#o#$U!P z4ThJ0efM?GWiKo$l44mV-TgLgqG11Ch`$IaO_NfL*G^06>-eupE@pjP1a)F6A9-ZY&tLd5x7W@>ZFwW&%pU{rP2v(3htMo} z7tn1DfnH)pP)x#>80sTVW!Ln2&&Agq&Z|JfSMl}n`@#YrvF<L|3(7IH+LVaa6IE z-S`E&F%!*o+*!mdsbF{h^jK9{?zmUNg>K|vj!{FCVFs>9#TdChrkW6fdMLq&5rCBH zXj2&tLo25BDp+&kDm5Qu#l({1M>)xSzK%bb7WNy6{!;CHG&!^;2TP?qp(RT3g2myk&+5OfAQMK zD9MSabiw=^?~SY-BalmfkWJ~&+H-ZrW(^0$JLem-agVj~YQ2lqTK6E(D%0%hxM zVQE^w$swi|^Z+nt-Y zjP(+6XG!kT-)bmD7~SIU4!(aWH?{w!y|F^SM&4H1j@A!rr^9m2K7yk?CXZi3IUkMTOQe8 z3@^C#0ui8NqP}y#t&Twskl?E^ZwE37Z)*-!*3kC~g6W@ztQ{fKOPzAf2IjN|ssfsf$luxQn{eG!F^DfG!h~O5h@0(>ZqtJ?p&dSRC zml9RwV*n#+`VoN1dZ++&R@#69#p)Ye%K(Z;4`Gi`rJWl3^L3Sn+R!x_i3t9S3eTV` zgJ~PzJFi9DmoHH9`B?A@E6Zw@)1;?*9e<_kvGG*(6V$`r=iC4!&6GNI|E)InTGrHq zt|O9U7oSB){P2$_5YQx(@10gRT{oCTxfA3ASD=u_iT4oH{v#J{wd8f$A z2|_MubJaWzl?o=kJ|Y&CaSLFpIy%f=R^4P&+>J-*OLTs(yu7Y0!s%I?-28;>a!LZs zzUCGd3;`9mymGY8*c{HuKsRhtY^%FDg$C6b*#xzu%$)~WroAqn%6Z(XIRb!nIB3)z z@tawNV~lY%RjfI1!1CtpQN{_yhy00T;&TK-)C*V~#-J+Q`nT};Cr)6K)3f{3nEC6U zsT;sZ0=lXAzcIeBW}V_gZ6H#GqTxlYv!%Qs8C56&KrI+Fg?wk91GvufZ0S>`hEvr9 z4aKTu{BysTDxAZtrv^&=eVkZn*<{)5`Z|m4P@6P&;sn1iinp5ldh-+77ufx{F^wnT zzRNp(A-hSZ6#_UtRg!tk#aGyMr`rCELzM#mQ$k-2pbRTkeQyX;}_k|whb59 zHbSM`Ih;hqT!oJu;e(L=@5kAVmaeYCf|Wz_V(zd%l`a3AmGDzQ;IOUi?e*jCefe4p zw2!jKPFxPwKc$TzvA9%snOSCn$^MAwAG68NNGGfeB*+mv)p&H$?u>L%fB5b>rJh0n zxagj72__yOi|oJEvj68olmgFT*t4iPWX^nSfN?t9a`IUKy#N51ag-{&JPdg<5;Jv* zO(KT8asN>J{%fq`djK%z+o~iH3hu0lF6R1?@P4@XDX{#yqIb#GgDS&3q*Lj44f*@6 z;%{8=y)tBVE`A8K@KtMJG@PA=v%md1&uNGu(+vZ_Ob$tXvW9BoMqK{&+W&f*Isp<1 zVAW!yxDz~9){c(XL@K~0Z9qI;z5+ALzv!u8PVPV;96^K)XxE<`@t;pPHm4G~i_rs# zmARoUQi1%`as{z^UM&##MFK9s`4WbPW!m{3j*H@?f2fc++nLj0-;3kB-R()0NqjmDi;T1_z$t)0PVx^e7YFPDF}@^ z0&27s*mq)7dxau*um)9~oy(s-dnRsfU(bXSiWB~3{gY(&)kXjc?bBuBPg>y@I#3+e zmeOR?N4Ot9S4hb{?N;S>*X||YLJ&Gnp50#TqKMX zo!c~OZSs%esDtzBAQ>NUPyD0e`1t{K&uMbPK1J&BKg{OmnG4KS;8c8Wh+h782K)Y6 z9D9J3y*(~wow{}ZxXfPzd&!*&(5v^t|GwYfKgHAr4uHhTCrf9E3I8(spR9y`c!jsn zDM0OM^3A`>=U*?|1rDJ~xm)+Kep2xt2@R0<&UqNdz|Z1^%xm7RoH3FLGd_%FkiTKI z{Js$U-P7Ez6Usi!AA6j6!F&Ib%rl*TQ*AGB&{GvrCI7>$>Q2vL(3H(j)WCnUzzUH3v!Cam(I8jNP!0{KwIG= zV;#6(b9$AQ=s5B!-`dgqlk9j)0z3}7pL^>0_`ZaC=EQo`spxe|n?Atm@jZIoFYxCd z<6JroDh7;6-~C5>>K6?qZvd}}!T)Du$zNX;76w>eeB6*U;Frk!(-Uwt!Px41HT*wF z2R_iOqetW$S3jWL{%@MXyAP1iDB3R`|HD_`o%Y_j4viN5SL5d|_V@r;x-gF0JO6{w zy?O<#>|J>$2Y|@(f17V2hj&ZgwHLoskI-{(zz-Qv!_g>4caz`i_jJ_1D<=#(!1SK7 zBq2OIH9YaDjaTuUydVGld%w85hVTWB)1<2c;7$Lhubh1#2 zDCEQ6+}&p*{eAoK6Hm3pR?~Bf|0`wAVN3#ZKhEJP z$Wb#fD)$BdtKD_$Gr#jH%^y1*{l^5?0U@d+c8)s}>pxTSyQ0G91IX!%0QFt}AIQl9 z2nN`@IqP4>JJWvkr%3ewUz#pH37ps`LLxA1Dg8becf*vXas&SF@kbNSzb(qUnxLGM8 z=*hJr!3|CaQkAZzpLQ<(@49bqr-bkyppAPZ##wX|;tLQA96%zb$G^Mn2&p}kiCwf}~(6BC-d<=%uJK9tM<~|9b zp){*5jcbzT^?$pgQV(Fi*>c^xk?2hq58yU*xL8Vj0-`aVL^z?r@2cbYTD#l11sk@C zbD#M-d*-t_h}5lN#SAUd1D82N7?9brr+t+9cQ56c;s@q?b|YN5g6L`Pa9I^?0K6Ex z#X`;>Uy(f7a?_)+_GIJ>>6Z|6%IC>>Z_0FGf=Z{vo`OoYw?Gcp+pGjQ=J&r;3I8 z1NI$}iVb+l1dr+`&cj0PIU8nx+2pk`ySg7Dj5Hkv{gHR@=}&#Fk{T-6>JxzRDdA=> zoTwD<`j6DO377}4TerA=+QKxKmU+sxftx=YUhkAkHNn%rAtImc!S{t=l>v(LGQ2iu znQth1`pWd`XKFw*V9-tNUa>nc$1Q&?ulrr?>fp>GC_fzL#)l=;e0?B``gNW3Zo6Tu z*sko@6{4fXI14B%Va)0bhA&MLI3Fau(5lfN$MFI)b@Tn7aTB2oa*<)PlK|b14vyu& z!`+_}1=zUkr__RG{s5i}J0=2|J?pCVhr1|_tM|Bw;f?JBA|!9rrv=gLAQV03qK6ym zQsTizXOih}o2}$0WZ4$FLuI3E!=L{*n3cc2^eWpcWG5k&ZWn z(8w_nAZq+mM`!Y3P~`K=`6B%MO+s_;>Q49GA>gE0UYFO4P{jpeVLvAS>lZ$X)PgKk ziMo#sKwSa^dL}e0?;(0YIJcgougn}o#e6l4^Z=p-6L^-G%D>(sl$Z^~KOj}$$^!$= z7nJ?uI1sT4E2A@W_SfhDZO1WpoE}Vz8u?{DPR4ME7nS4qnBRKq8$Y)T*OM*GtF?RF z<5s4^&;IrsAdZTtU|;;CJ8feDI1-=#$&n~eJnZ>@aQ0a)%k!)?5 ze+!xmZC_gGS;6Yg=}ut$$+y3DkRdUiB#yt=LDt^R*~vNk)OAxjiOKMrI08mx`PB(Z z6TTJm!@@s0?K535Rsxut(CjL0yt`2m2I|3>?+2YNn18|qVC4!a%s`S^WB^5sF?NlC z(iPJZ)}2D;PkD~%51V@^PTjHVK`UsKpnmrVE(%UUaw%*BUcdqYaQ<2MK8y!KJ$znP zARN_+mHYE=Vy+WW5`HG-ILqAsosu3HM;^43yAaVJ6Ew#h87j$N*|Su*@&ZuG)dx*z z`bCsiVP0LHmT3>0*I#_)iu^^RUr)*`Q-_e(Ee&Es`Z)ylS0?5vTF@*tDgsY#=DG}Z~!XlH&XviVf^?&NEdJf`8`uR&2#v%Drz#Ycg!yJQZQ3mnU*=2;ohD<=X#;`z$%2FDP#5{}103R;6aM zkCpv7sCOW4E_@@_TozsJG{<^ey35T^+$i< z%3@#vbYMbyP3)FuYWzR(7XKGHexs8cJ^o@t5&h+#yZyH>1EK+x3VCRkXkDX_41axf zskN}#ek*14IpJ?Z90HJfl_wMb0W^KaX@DzNAZDjji+@?SDz`JoNUERq>Waf@{PXAf zrvOlji3#7cf|uVkC@`5eruDXRJcIsuPZ|a`1?I=K30n&u`h^DMd>?78q<7UEm&fnE zCm>+k{fyCfRNmD{xz-snKF$a2$1SK@*(#10nvA=H*O<&OV*{+Qe8*>LgAO#)I(5)%`khzI(msaXoBKNe2u zE&1-eN>uaK)7WBkq~iTelhDgf6+XAB#O}Uf+SD#Hq@y)H4dl81+9+=ZBAkM6NE;q3 zOqCbAH0KRk0<>qVQ$|~MWMUa$6g(p%hIImhg0+1GjlZk^+&VO)NZ84q?_8lxxSN{mNZ{j7Eb5WR$2qy``x&_!l~0c+ ziX==2vlu%y$|5Q05@_k-Z9=|$X{;bD+s*J}o0+(j+|V0u*DBAmQPMxL1J>9>A*g2i zotmPM&VL71+ct`W++K*~IxSOv)%%VMycEVZSGApW+)=@be1lztiskzh4!iot` z@Z5q7#>b<&b9dE zK=R(|wD^MMplf6&`{8)4A2tdID?aRSbxX@Zp=&@sGo%7ZB(daJV!vSf0T%z?v7~Y= z`>krm0#9+3lj2^uv+df?bSR#LV$foxk|``Ag6S&hzeYSgy0A1e{l+#21*l~*`2TTpVpwl2 zU`#d4(|G1|OEWn+h2Ij54j#>^dfxA|*B!Ft@-jjh4O49@$L@|TF{Wu)^^~r36nsL; zWpd{tUTRCApl4LpOmk~D$}o0&wAc_h8y9WqGl! z&1<3WGwHi^+F`p(2W*?(h4UML>Iy5G5o32YBc5t87An&ovRh$Ngi|p2vXH+hAdcE| z=(Vlb?#h@86;v&!xK2o7f~8i~mdZ^PRb~-WvBFrr*{F)!>6*_*!^_lf5CZObuDsQDeUC{JB0My(7Tgj}RQm8Uj@ zg10wMAOkWWQggJ)bil_rlLyp(YR*~C7#TMqCe@!+GRZCWPgHZq^Oo9-h{asoIOJ~=2>ZFk&$(Lj#s;pF}(YFv`1q9QC7dHI~?SY8@RZ6XpVw_Ray+*=}}l1%VWy?F_O zyfo5e?;?1-nGrW|BDvSTpuBqD05})kR3(Ia6HglMWV4X-4cT3zl#D`WxZR&PI21e9 zDr!??6&Oq3E>r+g9r)M{lec&e9s@hd27OUVnNq(MYUFkocA!lI-dTpq+G)b=$HLiW zH>=u!k;2i-4lFs=@vG%V0Jf%~@cVsz!-}Q+x!uo>+cCkm9RVVNS0PA;`IGf3K8t}2 z>&^M`&jy7dXJ;x_krzTt`+UW-0Sa#gEZbl~0o9}Jj9m5OsC#RpCWp6U7{xfOm{+RE znJ9v%TiV0REJo>TJ7VvGgKcA=CX5rVar82pr8{b`u6foBf4r+z;M8%+JylfLCwC8= zb-1}=-rCXf;j2V^>KYG1>eWVxlZxo;2YcIMt!&3Q=AQc8Dcr7#p$1E$D;PD7jcn?* zrhc>fMiME8OhJP3auKdq$q2zA85H470tOvbpoheoK%vv-IB$Q4|8+i!oUz<5^pmlHUye=8#C7B!V(1C4%8P1dQs>L%6{92?Q5e^ z+3{+MA^era#z1?kCi!)hQIji2{8QCr`VD~vsjquu$D=uJ9X6e$kHnThT~2D~#JPd$ zlK85fHt6IDgV~T3E4pCplkx@a$HFR1(KNli&v$qtBGhZXQMpVNdM_yZ-8UVR{Gv{M}EzVA>}WIw#{tZCnA=Dyc#mw^7aZT?o(_xg{~ z>C0m?`zbvxn;RL|-ti_-GRkO{*;+D(3ED3t^4TGnHb?x|ZjM9^tlT0&!RZvBPtHT# zJr7HSM6XepTULR$+KW8bEM1TzbmJishsNn!kM=nBj+*y&3Qs#D0gX&%9pm!#Q`1_J zDU@Myw1gB(jN`Yyt&E?LF+llug1!vWdoVNQ^pyBsG-%Qv;$*Jgti0}v`08@%b@P@d z+OpOvh4;OVp-s{+@q21 z`A8+~aY+$0EyJj#6Dmlv=CK}KVG+(6y1N^Y(BkQ@{gO(Nki=qA4-c*{cy4_hcdf-8 zuhv+w=b`22n-%DMv+?<-p37O>%h`hknwmsi?qeTO)oCz`FL|mhmoh-KuAZFR{1q9} z?5}57%qk5Rus6pQojwj}?am}zzcCZz+CyA67VM8j#G-DZim0!$de=-@*$Nu!Smi&M z^^2lwfA8mW439%PVyK+i58l-HKm?zX3H$Kj39axo1Q}_LqKBfZ?@+5gU#Favh<#Q zS*}`XmjJS@y(?VPYCjYPnuy*g5O|;msR6pYnz_ra+T~XB>VD811W^eNS{%XsLDwl% zK;d0c0(MJMmi2ZMl*Gi0RyCXNN{qj%sdjE1wm;*gRy$zO+iP)e=-A-_cZZVOq{X?< zOYF>rp()-e=WvfAAG__hDI-_5-`~x5#DyS3LL~BLuibrPnbuts$HF5jw^@{GR!}bA zRD1kpfD3?6vO4r|2TLY^^55gT>sHy1kV3Py9<6gg9kfm4YeV?9ck^sVXpE{>Q1e<) zAGM!Hg^v(mB;t=hjgjZ_2I}qSy_v@7S}9T>QK6oDnndO!EIQ1S^iV!N$&#V6bk6q3 zmcxR}kT5Q8=Uuvq1@HXDajDmBUq*1}g4OK|);YOH#VjX2$)*Q+c7j;qda0|@0mTQj zD`|eSBR&L~502qtam#Gpb^>|GmgO?uI+jAZqt1u6SD`#2@PMYxUUt+J!Hpizxfn*4Ew!mV z#7+cYfQ%eOGV9#sbzEf~;;cK|Q$?+~t7^GIO3aXZ;44@0@v2r|9;mJOd9-{ahB1U) z=%(Uw{R);;CF8G)H#w-yC%CMdKl(zDJh6Fe(IE&W&pTNboYVRE%7I~uMOs?ESXq>+ zPIMIP6}c)bg9GKnu9OGTB) z#O4c0Nog971X4#T8xUYc)V7Gs@!uqb_>`Z+8@kw*+(P??p8D+@>dZHM@>+030%FQH zCp zVN9R&Wr)skijHCZDv94W-$_(WU;o5BqC1w@gv*^Z-eY92a7}a66CQ=-1bIVLCKN(J?%$UaCTM$08v-qo1Car>yj&Y zrBE?+y8|*v`9_X7{lID1qn&as>q{$|(!&}$*fCnI7aPcOF_rA~RkK*N=hZHIn}m(# z{@s1b=lcT!ZZtVOnjEY*AcpkKP_9Q5O&k*z<1(I4t@Dt~h<8`7F0rW}x8zjfST98+6oG^hzhKR<5bZeJ#5g8NpEZ9p;3O)}5t-ZAkI< zfef0Fp%3XY6RFKo3=t6?nyN)!Dt8tn49JMxr$z~}0$>r4uUm)T~X=7%I zbGx?Y;av_qv_E)8sp{0v8k)F+rhoWSNhB1}-x|40oD$L5$!chRK-SP6wOyS%OnE@j zV*m^1iY6aheZ-9$camj&9G#J{yjjT)Uq5-7F|;~3SHVdbd7%pJVu!X;F35uPq0-nl zRUhbpTqSERV3)yNk)+EG-P#}kwu*xzcMl=aUHXW(2aAYFT!L!%cy{OCCi>2^}ysn5G4}I*PC=)LcCv{ zF&ZVbW?l5ww}ji(GlO7zdxA^%%bR`YBY~=BrrOG4>&`h<_$_uL>9?Ur%~i!Ciw&2h zdOY{pi4FDk{i{P(BVD>#)Y_$KEadhEdJCtb5i@3!yP)H(s3_*poSJRl&qK(aaT@0R zZvllzjBTpt7dl~k4Zqk|2w-3QD=+h$ri-ZH*Hg%v9a|qdxQ>CQ8pBPZ^Cl3E+LAdc zrJWtjTyAZ&;V+j+4ONH>CQNsNhRs6r(KlH@?Dztq9+P9JbFLcs$BGFR)Uv$DzG3~D zfDQefA|V?`S5u_ujpk+-BhTtPo%yL2mv?j=d+*z{XinlAC7`!=Cnv`TlY$qWp|DB9 zA#FR$RQ@$7MXOb&JTb@*VUl0L1WG;xd`5MKXR7UYoSp8OMD9tfc{+{Tw98dhrQVqn zk=p5CI1W>PAE>Ijx!f-|HyBJZhB{W>^D}Faj^$yjeR<4>u|+QO`keO@mWPhZ!`l!) zfocbPjw>bydM{Cd%7rD3<~vVLuCn7ieqc9>V_Q3%VO|iiU={g_|Snx%nx*WZ-#cqPA2g&O?V8dcGE9kot*ySN@Y$N;DW@Z^U5cQ$Y6i3W`w|FCF!s$UZc~8X%tO9DM9dhZ1mLH@CQUQZg z(mRnf)~lY!Vw8MT0xzFdFRrY;(MEf?n{&8Grl!G9_M0ag{StU|VN1*~lg7TCxyfdk zk7};jclI`JaJL~{rYnq4ZdPplWgp`wYxp#~c2{{lZ`kP+8-Zv=0BY3Jzw<#Xnz{Ek zNUa7TVn;!yqc0Ct3ZKt>*v@Nn$J&>EF?X1N9x3Mgp$Cm`44bT$*{nuWg@(ot3m^}} z>Efvh;1(M(Ph zp%vyKGFtPpYQ&~@FCp+oF{~F)U@OH2ov_BVwb2+wB(?-ZS*i=il8~LS36&+R9|vaQ zWvtgqPsljm3l(7ZEu(afcMn|nbq>D$m?g6b#`CrPGS?%RoAZ8KZaF^L* z&snJ4h`A`FK*_Lwam7JkdsvWBVI@di(~yOrq_)#z+i#_IKy?;_;r7o1`TGOsgLjzu z7hx6dWdJJ0VaQzp5?o@(xRv-f>Cz^iHUboahutCI(2SxFeOwO2AX|&E`p1XhM&%W- zo>L7|=Vw{=*|#RnW#qd9GThAMU64UmFOP|8HhDfjuMyk7tQSyRPD!Ahu%jm=xJN=4^JmACvu}5 z^V?a^2t$g6)sv(9p`>sK6K~=GtyY+8VdxxVQ_~JP_%ImlvuqZd4_o$OM$4QWB&-Hz zP>@buveizgABRi3td1BOxzRroiY~fR&#zi>^UlVfy#`kj?Al@&h~nGE_-^vO*KWO9XT)q3%lNQW0y2ySu zJ0k4->aCLDT>b8KF-mtfxMzon!Od7%6I?Z7IpDP0xrEVJbd&F7eZ#0}PIW@ zxOR7Yci=v;qstLCu_rIc+2*;&>dM76VgU7yi9D2om(~$drR}jAGC4s3J=}EKvne%- ztRAmPp0XGbm^Sj9Ms7sa3fy8bU@whTWvQ)A+E=?j*7#N@WNT16=26nj8q?h(LejTc z+-M@?y2KAl?A8N7mH8Un+fEVle*Jo9u3MoFU3hUUa+U$U)pCSxB9?tr<6Pd&A-TUI|E`P(c!+5#A>n+XSMy7&uaTQpVej*RJf9J zEyVYDd-u`Na{J5(0@Sg#Xpw%sjOw^Woz-FI*rv^dSZrV5zKn^3w;W_EtuJ}l0iinE z46EQSc>4Osx${sQYuQ^s{DK0vE77p|*Ja~d{kNo|9x!V;?{BPyLMc7;r4ve($B)Lw zzE!rfghUZTYjr2=EGœLW;rZ#FW!k9!kIc^&23EJu$Y_5or4{b!Xm1fyl4;lMp= zpo=F$xD_!iKrrWYc+o-S$7WGqRCt#G)5VIgTM#A9w+-ao)j)X%x-8kk~EI-Lu}5NvI7&m!pbdxTEuHK>aOHio4-Woc^?$M;$PF= zWl>y3Mk?CmAM%)^>+xFPVa$ixg(5S(rmd`v^}C_Xn}ilQy@Lj-_HHZuwFbe_r{>UZ zm*>$vLkm`zype`V4F`pD9jI`I5z5RI2t$wGhwn$a&s;n?LJGT>HrpiZy1C7FNWJXh zD%L|q$2NU_CRMaQ-v~nshD}x-#kCdmM5%(3Ixwup@5=H_pSPJOuK8*Z+&LoYmSwew ztoNr1O&E6r$2rvx+ALzzHBYa(N=2b_rF3b`bu`K!MKWtrCJ3Ii?SAtc`jX=WHsGr1 zzs$a11zQQ-7&t_1(pto>AZk$KJ1bYfc2=OQvwPEvcQ`nScwycH!4T}%`c_U<;HBvQ z)83WGL%qHKOq8Xf5=x|nNSaDi#7wzbP%}a?wr)k%%C$BOCapJXg(3zS>lGo{DPLsI zzBDLe#*%fcWBHvaGvhM&{$8*9$M=uxuj72q=Q+>vexCE3=X^e&v1)m|LqgA?71!@+ zJwvdv8;y`QE~xf`)mbYyHWpa5Uu){r{VisCbXs5fbiGfuT%@JpI}Ku(uopMF-Z4pMSh~%<9rl8)~ zFsRzIab_^1Kgp2T_Hj@ep|G_pHp3=2M@Y=IMac6HMs`e1=aM0NgKle1$#EICpeZ}J zFc!srYhi7qXYIG_G0>`3=*3BrRljGT%fy$|9)t^Yd!!2}ENyei84I0q_-%S%yjwZ4 zK*un3moWPK2WkiZ!3iuuwLxOq;e&dAUV0DR&}qzj&6##Gej_dTyu| zsbD0uG32+nCsH>m9@Y2G6dTxwzE9D;bzHjYPc{j>>+Fx5X^n5O)_Da+C4;TxW@zD~ zjUyA$Erh_!yFeQ96TJd^AV17cuYbCs>$D}iyI$5F$AF>}7TRC_=rr%X6JRwH4Rsb3 zvpkH?!e<|KBjj=JtM(1TsQzB6MmqPnfBzxCCRtaPpG`|}sdK)n|v3tA!vZAjiGrFV<-k2p^? zTutefW-H3E?a+CfUm87;Ho+BK;%|buzd1>iFgw;M+5Rc7wJEg4<^UUvTk&>lYR}PD zkKHX8q`PqQ456T4RI0b?orDMYXYA2n`?mHfE`(>lQ~Rq86}K`DUme~W$Y=cf?AJ-I zu-Nn}+s%oX+T98_?F2@W;GSGvy;cpqs-f_4xDoM0`a{Q?PB5l~qA>q22M6=oxZVzSRz= z8+Gyhg>|}RYRY%I%d+l>9tJ+}Gi5)y3+XSFLy$W=SryrwI^8mgx%a?xM`dKiXu_k2 zEs|G@!jun*>DrxOv#A%wV@ik>j$sG$(tj^~@#0;X-GpF^q@UCie4|0;NUAaB#oy2AxZ0hT?FzMjV#r5H5vx8TeWGj)&O0mB+b_3Y0zchi=wzzy zzU}y~G1SwtzE+z$=lgP1S!XhPLdFX^BJFk0Z_8+mKXx$K;g6Hw0-Eg_clJ%$Qf8Ab zeoIe|&R9EAAnQ@2#0~So7|JT^>+7dHrx-VGqG-sM%Jui9fvr!5obTpe^(}Kg6P+Rd zsj%NQJUkc#%a?d(L-NVt-D5Y?)^jr@ zv@hWv<-t=e=-*e;UmUGA2pLwJ(7lxtTPSPj(q49F;Dr8ow|-o2_3jtXXeiycnba(& zqE)9;CQ2*0sC<3Q-{*lp|AeddJnq| zA2XzHhgK%oH2b%IR%%JTRG&dBp}|AYtflby;I~y5|F+Kk(lM*7ckV;jZ4;-jQA%x* zM_V`A6Yd~R419h)SoKk~l8|{=?AGB!vbf_?vbeXVy+`ab9LoAe664rhT>E1+V3;Tw(`+=t^ z^rod*Mag0%kOSd8-5lJ}EVd)hC~-o<1TpXcpFDQ4;PZQ?MH9@oc28EKLn163G2Wr|ZL-oD)}}ExmGD=AsfSdyO~z zp=fR*{)3AD+Sr|I^l5HBXLeThbpqHrzs^U#(9R!S17m@inh{W-G|I&#~n zV@BG$wCVN4(anD>*0>-lBth;<(F{>3aaDTFEHHTv2a4tsQHt1%9kWI!-hVGTl}EhL z3Y9f`bqN#nxh;TKy!2Fs^Hy{H(c7W3ap$=>akhDTYr-F1pzDFg=%kVeTg4L>+_?&9 zsbV~?YRCD((hDFC1;Mw|kXm>8ifMJ2c3m=fG}m@LmSyk3>!qHRPqUi!*n=bS1h&ex z1OZbXx(R+Y4mz+E-zAPbyJpfAgLQk~qg5%bntD0^py|B$Rm>5tL%NSAJoLO7dxWhr zX*vbv2itfbpSN#JTxKzp{^A3sy-A^zbo;VAIVhAukmQ@}J<1~enc~snHczoQIA<7l z(0GK2j>8$n)e2R?B-4q6{Rsv~ov!M(cXw0aJ!2lVcabV$G)34-xg_FPUraZPs*K7b zI&2$Dhj~vRTq1qec!CuxcPp0^Bmesx3CVv+l2V-^WD_sR3MyPOmaeLX+Qw>_A8LO# zAI(3l8nO!_;Sel#^z3Q`2pC^I9`i8-BuQgGs*!uo6YG+p=q?MgqB(XgoX(aHi9gWX{m%5%$mvZ2j8Zfw^i&#%0R4d^j?;+tVPmv zL08JphPFJ5dh0vueHms1(o+$QqMR zY+aJrdw`LUSyt5LWJ_fql(%yP9%s4O>NFFCTw?o81$}7fS)o^H|4sb^uEEznfzpx; zh*M4A@v!Namt=%KFCjXW2Q~l|N9;>KIpH*PiZQAn%SP-5G{#mE=w|Xak?GQX&3CU3 zrEbK4uQV>LgLRr<`89A?Lv=e86s*6q9xr`-_!KpYuR~(}jDASouu!lqU1f8i{e^KcIgxD%sXZ^>>M}^o=Hxk$;InSoeYB;v>503KShJ zHQkpOgYbcKSAtmk97qw`er%N*O)eR`!$PyIg8J-fHQ5Dju57}}y8{2INyCGev>ZY~D>3IxLEjWqpx}lB| zdi~((5aX8wqdc&g;>jvZ4!(NOMYIF-Tk1(Xm~bBr)$JAmc;L=@e-tlSlf--LLvy3 zVUsclvCQ(raJg2vKRTrtLTnd&V6WH#eyWctqNq{l9dPqCQnM1)e zqYCIj5Sh1c?YBhfk!r|~t*M@I+ZK6i(XCWrk!~Q(kU2(5S6kpQK(>9H~xTGa?Nu&BX2;jl~a4{oiSUBZR0wEFLaK+p#)t0u+{5xBWNzyli zEcL?a<4YRx*Tgl?!L;Yszb{|LwWy51?IC&T@}YA%I)H#9mO`A}2}>(`-aM2^D~8g9 znPW;b9uOb#4Dy>p!nF9nIM-NyVnqIZKDA%lR?1r$;qwb_n-koZ2W+22d23HHzf2cZ z3+##vMwFsf(|edVkQ@NDD&jHVrSGMH{d1-m0}#x+A0pQ>MF>ORA)5eJe36=KmnX-r znFX{^%N@FbsX-aS!9{^-B}1b&EXy4`4|qe3g&ecvl+Xf5E|eOiBkUoUPpJznys&^@ zlmtW-I!;s3F1TauZ5;#WAvIIqTlZ`ds>Sj=Uhc68x|l9fE* zE>QcH|7dPmo*er^@GYf0Re{b7{Y;Hzg`!6ML!0V zk$!g8xaSISH}k3n^6^h*6#CtI@?f$cpew_Zsl#1K09h4xolh! zE&MqF{3LjZ>nLO;dY^zEF@no08?3@=fY|#5CwGet^bZqzpiZAc+a3!_lf!|aEIIt& zuMM{gm5$~o)}AC!zn9;|T1l}wcmjM~iiw4{Yyu0zeGzJ&$?P&6I<-<@VQ6lT9HI9x zZyaOPcWw~Qz*L)8a23s#&9c_8slkk_R z)y#JgAWlG+5Uy4d$dq+0vV*xObk>n4sWSbeQ8#a&wm&s5|2c=u?ga3Bg?U6g7t2F2>nhf~}w|YG&bg`=8BJAf)CZ@s+}Kz$Wk#cJ#M89VQ5yuUry!ZzsQX zMTKhsqszz;s*SXWfvoB!!s}v%cmnYy_Twqa%d5CCVLH{8a0cqc-;Fr*r*Mlb5LZG#{aRnh50t2M;b;0SqtRQQgGS^-WOh06kn>{ zpNG#sEMZV#U>JN}0hFcD<42a>B>oz_PZU`nfLGCqph*mmV+Wg!SN88hvkz62Fb~%7 z5wOl*?r$(%DL>alf)kSe?Q91K!lR+Jl6e*&n7LYj<+kt&T>6l%sXS-JsI6GtnZdHm z_*)HqkN(@J2#0a*H%Ae?5Mw6HQP{u(z#@1`YwkE9H-uk6tMwR(66MBJj3^mwdfon7 zROE>N^4J1(06u}LwA>}iE)1cglmYrt+ifw6YV?dTR8(RQgK>v9u`y+f0bAq~AkVvF z-IFVATaCatqdlCoD82;Q5I%r?bE}$>mn(={41B%hEbrfWUgUjnDFF8Rwx}N3Gk8mR z=yTxe{yyRixft#^?-ncyoJPgxtJmLPZe059;vkKM?p9|?WFC|6sL#VbdHtqh32o4( zMuJl!gVSNOf0rU_rViE%zBBr`l>#rw(z4utTPaPe0TZI7Jku3din}ktxUG*NE9m53 s;4!oEzrb7U!v6w~nJfLTN4)Xbhx)bdt_U{tD)4ts`ND5+PF=bCf3o79UH||9 literal 0 HcmV?d00001 diff --git a/docs/images/fabric-operator-sample-network.png b/docs/images/fabric-operator-sample-network.png new file mode 100644 index 0000000000000000000000000000000000000000..8744b0b9ae74297feb3939fa83913a48aeaa4851 GIT binary patch literal 97281 zcmeEugc=B ze9n2^Kj3@d2khB<-}|l=*IMh^gsCda;9`?vgFql$Iax_H5D5Je1VXuwi4L4`cGUg^ z0%4QeNJyy4Nk~wux;R$v>y?M|Gsq*paYj-WQ+e;P z6^I@YU`bQI2@MUTu`a}xmWa{8f5eK4gr+gddJ8rFE^ALDLn*3f_LJy?_D*_S#(3+9D|mm+@L=4&U>*zv73Qo2bFvu(-6j%L2KXJUA*%^ zI_EC}SBwQ3B0%Xm)Z3%y3F;XS>q&7sj*hl?jL!7%T75dH3l#B(0#T#U!o_d}o}hQ2 z>VU`=Ll1Y*&c!h&Q75U<@6-5O1=?5QEI)l%NkxuKlueor!d>KffCK43b4D}3Xy~NA zi?kGYGrt3e!Ucp~#Q!VaejDz2Lcr5{jiO9vO|XjPqRViz?5D=Ofwn*8Ccn z31UR5xTBaCpp1}mG5**0){uCR`X5%jzR7`2|DHz&T`S1ll-?ev`Tgad^aE1m7d4AQ zUQ`Bn6YmNZJ-%c6kx>Sn?%{YLp{d>{q@ar$k>-4K%m~(CWXAp|u`NcF-J@Dpct?Y{ z_@+DYN16WD^Ep-4%&9@+()!tkLrB(q5Rz6D!)HLp*)zkfYt3s|heBuEFM?w}SwHUM zEKfF3HYkeQL=i-IZa$AIO@FV^}lny=iZOyPm$$-b?Pl&71Cm*&*`$_sq(8LhRTHdEp zV`U1_P=-h}iKkMxGQ5oD?LJ=Sa3S@kYRsHff{H;UUaM*_^2MvlGh0yQ#keHwKigLh zWX`5tWbPoCWKv}sV&`DpQ;Vi9WVL2_E61TwskoQENYN>9FE~0px)oo+Iz%;Z$on`c2TAls5@)h^0>52o0a_$g5-%!v;rW)~P<2f3W{Z^YJ*m zE)@4ATbM%FOsGsaacDz$RA@l7iR=UU4+`YbYtde;VTq&BhcSK8LiFBBF3REb7WY0T zGDkyLj#Nn#$D@xE=IQ+(HW~YfOKW}7rI%X`?bA#V&}}M>uur!y+M=7%6(~+mFeo)B znzd+z-+1(bjW+1fz?wy0uR?Egcfse0?wj3UmO^G!#%1POW+AQn%v?k{gbGBygz$vr zc%g*qgwzB|=G*ZpJ)Auu35fV44Gqm;jpIVM!oI@rlBQzIBEAw4wVXoT61tMH;yLxM zV(zh}v8dvkwjHFONI6LpNIiK>m2UUz^yj2X=-p-GW-He!8&BrbozW6BY}WOC7*f9U zI<5rv`an%vj$h*>&@pSrO4zIwwKdLX>5S&g@vQO8@T>xt=f)&@0{Y1frWeXD1aSp% zjY(`srAR5bo17GRk9p?JX3fq&e;9pcHS(CtuGpN^QO^SMDVMV~MXHa?@-DYvO@<|L z-+WqHazSbq@73JH8JhVwpC4OhY&3PdGfXJyG3!*b+OeK$-6(M`S<>>e^R`QLFFR5v zlOl^$%IL2xYjQ~6W|~Rz@bM68=4+<&bwz9+&*3p*ScM>mAjuGPfx57}7?iP(|2j_M5|kL>PnwuPJ|YRde;jhI1p%EFVuTdB5Gf; zhNVYVzhVwc4Xb}8a#xtlj6(emH?bX+J^_MxfUX?k$TZK=CIY3q$M~9;AnR_XS0>B5 z2>7vM%v@}v1*04{lW16$eWo9ycP4zGL?K9xFW)VHB$~=8AXRUg-{$tZ)_TJFE*CQQ zfCaXd3pb*{)_$(p9n#CRiEvnR*m4k_+MjB!W~iTWte8UD4xJpXQGa4JKTt#tPk&+f z!SGu%bYQHP?X!QgptYxVrlOJZ{4)Q*0uLN#`Cz~6SZAx~^3H|BRs)JJ?g`}-qA^h2 zxkR<1vUJakpm}ZF^K2m@prIOUyC7C2_pRz;@?!QP2cz^RPdsLld4eM4q0ppJ4of~#i?NWf5E$M;uW*lG6eGn{z_OW?;z!v$LkqhEx3T%+yqxr$_g(gpbsD`I z#99FlD1FbN5w1$UW%3SFSX*I-w+EHcGq(_#r~YOc=7y$o1wM|#?VSy;8-{DEs^prA z-jX;f+hqz9dD!mNzF!}>vz?f#7(?||xEz+K@@Xg!?#z@Rs(M^vls(G2$V z>*KQR0}4)x^MUAr*9VSp-zL_kY3Rsl(Pi-Y^PZP)h+l#Uz$=D&jp^>*uLX;IcjONf zK4Z?X~+#--#K>>CdvxgekJjRocGuu1rL!dk-=W_T;bR zlUd!cWbcFY3O`wQjiErKh+8atXem+fgi5|;+zTtL>QsLbM=0eKm#Q=ftZ#7NmpkayMZ*c(a zH6RVqoY|efj%Sp3TBnzb_aQaDth5CS8B~DRQBakTuo9V&pyDoDa}#OJ$lxHXWPeC{ zgLET>`JGo8z&Nu@nu$*OeT|uBbbk{#fa%%VYKp^Bv z8x0*d9VJDGnUe#%skzf*3wAGu>$^c9AukAU=wRVyO6}!f@8}Bg5~lfa2Lw32J`JX! z{&9<&oiL4#k}9=?lZyp4A3Fy-2aO0eH8r)6i@7C4O;Y;z=D;Um8f!NFLSt z$<6NMVg=?D5D)-!aDlnF*nm6OT)iFLOug6~U1@)N$RE#~X4K*EY+(fv9!2fjm*H*fJH51|F0s8vU z=;yP)w$S;<7C)c;wS|g{4X_TT*FzKG{Jn>t&;5SC5cqoUf0BjY1ntLJK+;68g}{GG ztq69TdA2ABBnFa`6xZ-V+M2`gA{?9U_{M(^6%*?<@wz6`Bc?ZQQiOypk0l-*hKGiS zx4xWs5l%sXbM{C>42|Fs(_@r)yx?#`BqpTXyL!k$sIgA-w=I+B>|>Q=-z8poXId1b zrwG9LT_woL@vxxjpCVuGY|8(dlv++Nf_@7MtBo05E%Kr~C z@d*D<_t}QJTAs!l_IW1P;UZIwv%IN-*rMZ zigx7E|K)mK~_%u^!jy(D9S$bTN!p9q9H5Eb(rZKyw+>6iAueq7cL z>@bF|Jiz<2>Ay{wdXHF)|7rf8tM%)Xe!CED3{2!SVd=sDfpU;#shI<(AFBVpF2H!1 zKWQZ;uW{W{f1?Ph3;&mX#5|A*NVGmXGR#PgjEtCHU0$pjCe`KU=88Q9A&XP1ws<{( z^%NE|4qNyqFl)*`e*8E%`-q3&UndvTf`$M6;EVg-qWJAu1x3Zfsw5>$YWyu^Qq0js z;}ouOS^D6>PN<-b=)dht2PZJ$av52tGY9YIS`K;$dxyUK!9Q!2 z7#hv{h)%&Z?bi>{f}vlI@rR62s|kkO+Rsw5X)hzm1!!g(ocIqwmg*V(n9o`|s{0pu zPgq1_1LEpm_!+NL|KnD_rpEUO5y$H^_R{FPSTa6;T7Znue3JMCY+NLD`zJ-B= zm?a%q1M3A+FrQn8INk6V>3jJ2$fV~m!mOQ`0Hk3K(x87eBVu^SrnkStvW?~C!zYW* zT{ujMf$^D$u((~ST=WeM;wd5I{yp@djz)V&vsZ9%woZJa*OSOb)_wA{{$8LM2yqVmS=d7@V|&h5F`FRu-?|~Vl7CuSV*YI z#$r#QkcF%}2yVJPeI*=%e+#wVDN*O51V9?HB8P^-+a%k>j!}FUQrRYGZlhUkhul)4 z`i%cB*Qlvt8ri&s)H7xU%d~6S(Z%UFL3@NlRQMl0e2}%ZEtWee>Lo;dXqf%j-aRag zotBfX=b4Z8Iu)|_o$d#ztRzL_vlXlU%>whK@KZ%{U}2D0R7-1Z^dzfv2sVjyi`yh6 zwQ(m`M>zhbrDA9gA$wL)JzsEqC_PZ|88MM4z7>4Dv}X!+b-_bL)?FMeE<y90EG_`h(8*)&JMK>3++1(lHEHDPoxPHQ${9PFG^|VF7>n%pmk7LWi&oO2|S9; zY%vJ`*kDFzq8z&Qv#$!GA;z5QxEAKl`~6liqUr|tLD6?AQ3B3LBo@KBPP`e{casO@ zG`9)tFM`?F`iG>Yp4$_v?B~2+A;45YLU4b)e1)Zg<`V(T_F0J7Dr391 z<+CV~VeOW5$#@fJ#ok-Q;Vr<%(%-#ngLcW=U4Yc_Ua;>b3J*w!wT&(3%CDx1=;!qf zR}C!GA$aV{1cc^LBtdtMp!UV~q{C~4>Xz$3y(k6mD^;~EM!hm+uSjp13=w;a^%fB>+M^{rm~jKo~H^zr6N$I{fM&(RLpd zy5Co-TWit8Hg@f0FfgPw;*)%i5%8>|@P!n$F<_xyvg-C?U9NYx-(bO%{kxe_ej(P4 ze!0ctcf6J!7u3s3AVz(Nx;03M;HE$Hk&VKCz+P#8aI$~V6ScgMTh_&2{Y)$rhkWj$ zETuvLzDv_;6}n*G6e5O%BPprx8c3Oj0Q)b@B8vzX{46d=sOPr}`)R(^s`x-Y1^Z!)>tAbt+8UVg71lRF zblCbD1(VvD3srHiLXyaJJ?GV#AW1`@9nD`R4gVNu{_c_h^p|a<7PjzTwHCE2F9!q@ z>3UW6!P)B~N9xN9(`4__i2<77$dK{Z`gcbK3O7*YTpt%= zzC*(I*CYX1xrc-QoyFWl`C6rayMRB5tjyeIW;}}FVP``DDEiaD$g>)l|E+VJ7lP7c zKg%8kfNNg6g4ePBXd5Udg1QA>p^eO2?d@WEfO;+3G$sN04@B9!)t=)1yyk&oo!4D^ zsSkwy)wLqBOdPj<39Z{7a-79i3swBEcQT4`!XtLd9aR$PtQ9M9jjs-Wg#2tds9_0c`tV!T&7s#~=Vt z%G=97UyjK!#5fc;l%;h=#`_!Ms1G9*3%JAOpJ3HF6`1bYV zEh;K7v9aHid6f==dvqL@eRsW=V`LPwQh97>%JduXiY|vq{(VFbuf>XS!BF(;{bfXNR0)0T)Nw!^3Vt23i$HY`sZfexs`+(*JNAqXlQ2}w&8=4Ga7Y5p zrTm@9XK?#Djedh8Z<1-Pv8kOa6g5~TTk8}iX-u?KjG>Za!}O02HDv{q6Hc%r#pH^k)kDngcs$iO5SbV*d8+o9;%b zak~-p^3+PV!7;60Ne>_RBMc@lxF-a>UVuu|knrUZ2m> zr--~qIno0nzT#23Rga%rlerV7jSF5J!uos5m!K2g+jrRX?z?Y}C*V;)y4(B_idpW^ zHZF^vWQXnPI#)b7Ma6hP!h041uGri+Mq|dy^c8@v(l2(v<@EwZS=6{q`tXUEsIUtDGs!`IA04elYkle_P09In=j?grWecYPCPD zmwQl%7Sf*8JnOSrHn71vVL(gew-;2?281GwC-c552ZN%X^^L^kZ@*%DFGcX@8Z^4> z^mDl^%aU+DepYKe{Lskj3mSLUBCub=8*7+vfi@~_XB#Xe51g~H@S0d=-6ssWy!K3n zvs0R_LA6c`ZI-b2H$_>WqT_P)z9a@Giuj+sEwR+S{1QT?C^k6%8mS8j1-<(f3HO%L zKr=c>CW?}62b5O>i+cVP<7Q7hle%L6K!%_KkWDy$5YGJXR)!ki`r4RnN1}@PdhEv1(I^AeqD5VFZSRqv2SB2d_=P2pX%4smxgGkO_0ps1|O7ZS6| zUEwn-^Pk@5cN;d@C;s~fpJ{swv@9~rsPAM z^gW5?e*OhQ+##E$&&S&{X%{LYr@2_?N4pCPrJJG(iL9J6Jb?w<)i2TK-JZQkdOe zqNjPcZIjg(Y^}Qck=WQRa>6aWAk(KbmLo}eK!A5xolk-wrHzwAma^u3wTcTiG=`q<@7bL=vA08?|(@$ z;3r?}-9cM=yq5n#KmTeIM1CjW3Ss=Z)^xEWNG(sE$aRMb`dwVF&X!Kt_c-3E{esVU zqDI|sqq88xL$4lgJz5wuRY1E|Z$I_5#z<09GI86YH?hCEzxAGo$b6|S$0HOcZ#IZZ zRzC08n*(e-Ql9Q5Lek0FYg=ZfmTQ};ak3f4kXczGYlIRF33z=c!?hBSrvY!BYbnuK z1frkVO5X%>MJ42G%>1k90QUro0f+*BHusI~s&cY!EfDas!Xz6pIeXl^d8XHPb&1Fj*=+I8 zQ%E{4thbwBk2(TYLCyn=JI`x9)R6;KW?|7TcN=LcZub{;fxPes5}roLb{?#;7HI+T zPeV>Zw7b^X{?VI?GXvgK*2%z>2LTw>`2CgMH1*9Emt)M1t@1oqik=+OTBbMV-w$yt88xLo#`6opA$GJ`> zV4x3n^U|sGq zEJZAURCIwj>BZ%rrDkGcqTHS!@bx&|Y2MwEI%qTLM234sQFLkfZ2sRKFHzdiXek?4{dC|ovjBjkpATlb@ zsulw>zgo?rRf?YxGGI7eX9q78Ii56olXD2*7Y2yT+)9!$`{&P}2cRbpuQO>4w}^1B4lH6$QYqr*Aho_b?Sa*z|z{3FPiJsh4+Bwjn;dZHOK2UefHAy>~d* z*!qo)D8KzAb!zotq~Hz*!rf8GVfQmT;5%Zf%(_x7t?@Nw*w_u;4tcMyN&4QmsCxV{ zov)-MA0>m^Dw15tYlytUmBzMd2!O9sMR1j?Ymcd}G+1FH{om>(_~*!vIv3DSJVkZDapBncF-M@(zwpnMuz9 z6gJpbV*WYy77@`3D)v!Rl+b=?mnr0-%BD@Kig1DdJOEB)H^)V^MFFI{CdF*2TRtEBB20K``xvICyJ;RdD9r2WoBMV8;UkHlO7 zmwvZRwuf^7}a(vyDJbbvQgBxnYWYqGuir`O{G~^q-A`1MSSAa(Sk_G z9Mqw?0{m=i$-Uk4QCfF*-&NSuQLQonC;T&e{agGE=knnx064jp%&*Edwip*~_stp{ z=guj=od6MmAd-YJ{fw? zcZ8Qe5OI;&JyILsxgFz6d6(u%!a#zL5;z8jeI2~re6GzrzHz@y(11a7pr@}pdZpvx zbfwP`I&6O92pqr93+est83b2KVo|5W?r$oK?aOHaUf}yhJw9Cu6TT#74Ftzl<$|3 za{3P5X)pN;1f-5aCKv~+aPNC5s%l(qRed!E1fCf=0L#__BD9)}rGfGaxL!N7 z73+>xk=jREkmW?x0&hPYuq^!on-dkgs0&*cJ14VePP-GAjZZy~1-33oO%7KIJZv?} z4H^xe`X39p`Ytp$WvyqWu2~B!5f!&|^bjIRE@v2cX=hYVmgY!2lYQKhKys={}B9yrNwixPcY21sQ(R&$~ZE zWtVG|u~F^uQ6tJWpxl7LVX$31_GYeZF7KJc#p^VGa%Ykq@Iiub>)=Ya(KoFzeVifNXy;kGogD@XF~RU+RDjp zBEM1ba`}ig=Zr_pQ);)5@VnW3fU@J;9W_KW`GW6}w<^8=x**vLN}bs=-v<_hUy%{| zoL!$PYr4@!;J>LHe{n? zBnVfT_rdCM@c|_n*(NJOZ|G6Lakr>3qP4*$aBoyyej?zuO-9pbS-BHt(N zs_i>bg>zM3UV40s!-5Hwe_JYjzCUP->9O041lV$30u+ENrieIRC z$zx+w!VTnWO}p*|-7~=e0ajfBd&u4~%o4l?+kK*Y;u&1@M)Ep=vY+)IXWZ#R*)+ib3-9@7@H zYxi89OdMO@H@!IHwdin|=nl^_?hc+>a_Lhwr5Y6c6$oAeMtP8mZi5=11{so+6PZ3r zSO_?xn=wYixIpcSH7Iq3jCj0A0E{ff49t`SDLLU{y>qwcFr_`2f?}p2-gCYWsLt)o zHo1ossV|V$Y1K&c)@)tEyJK=Jd7E`n$IE6|YgSnk+s7TMr5nQ1rz`6XrgP#niVTMR zN!ydU{8LkkYuyy(NO-@J*bLP+lPfF5Rb?q3ki0#Fe}LCIT1rEpr$;K5hhMW;t`bSd zO3qxZ>)ppo=a#oh^bv$sQ$|DW{d)D*k1NjnuH2`Xb?f~S&TY^%OEk+Etumjaru!bR ztI>B{>osu zMj_F4?Zt8~%Kk@f(e9X9+L0Vzh9ZN{+!C^1+kMSDzlxflI+gS@euky-MJM*B$-?My z{W6_adXB11hMm*~eV{znEhb-fxpW7eg!zGm!pCh>`z2bymIeZa&Ic}cP=gNNH^n2-_vxIX0g*^F)|>3 zV3TyTdE5N+2$5yo)Ov)ZdS`Pl?CCEe(@Xt(R%xN)hq?P)Y2wS_$^`ntoIse`F=X#=briLZ+D*29TYSqSF%Nj*Sa~u2b zOZ!NvBCa4R;-9a*SsKQGt~+g0+W}1vdIlmD0|(!_-@wP7w(S zB@=?~Uzf}*=7SfOVS`r54mf4QFUm?sbLK4$yVMn%)aOKw;v4d2q_)$Uvvj-g-uhNz zeHS2$Vw-h=t!P1DAvL*iRGcnvZcu$Is%Sm+Cj%h6?*-7-Hpa-7^Yt-O78U$bbW*rXljX!y=H^;15=;8a-B^+j zvq~^}_+gf5?iAm~mU^1|ylG0=21UF>x|rxyPQ^3H_S>+|#!A!;cN&*`8jn_4@;v&B zUAjpV>OC&L5j>&9SgqVYsufTkT2C!$N==e?~LdFJ&fOc@8vEc0sBa7U)*M{s^L1xH05wn9=kIK3C-7^tG_T_A8Bnrzw9y| zIBg|%Y&;-gSSvWuQ81UxHL#6Xthx61(=RD^vnXbaip{ZXJRL1bwafM*iCUU>@?|0d zbXh{Zj#h}n^FzjbOIEHvDJEVW2aK>H&a6IH1oT`)t<8qh0Io|RZ0)O7s=B|+ujO#j1VS zrV?Aa7>gbwa=K?>GyB%4{>VK9eVj))Cvtka8LZ2`=^j-pe8Q@0i{`@fF+bC4vLs6W zPv^hmX#Dfb+rfl<)Fu1OjpdO|O$}x`2Cd7kr5u z#7knj9U_2eA?`_ti2Iz?#zax-mYV8V_xO4BEeEt2dluaD*28muN-Zq?%c`9tr&Wz4 zqY=5Uk!ZN?JYUqu*Ob@_c#G}&!@ft37AK?}^U~2t`5bxcB@+qw4(jki zHv)P3qr;xCnCBv9ywKzlXUL9&F2pNF3MNd_}l#xlnIY^MV2(u z8cZ6o&emT<$+S{8EKK)KS%J5zSA$_{tkuaM;zcNznJv-AFJVi5b^}fF9-8g*?v9Nl z-0;+6>$J^Oi$!zarFWR_F+8hm#zP0Gon&PFO`8kHO0)p5qK>h# zbP8v#UXkvj4n5Cak8Fm&O!t{?^QH9BCC!8uy*gb}uxf5~{vaQAwvlGmXG^wu`SvNa zsp`PH>?7s+gMN9GR3JyT;d7N^I*4Uv3?PfSf_1%KJ%A14Us-mmw2=(nZ@lwl4Q}iQ z&;2smk!Lthb=0_39~_{^lTiSq?%a<8mhwW}hU3BQm%~PV`_MMtu7yH^!Bmg6CsX}6 z$tz-itG5E-%XgqwI|!y{{)-dyEIg26FOA(6}9)ybAr__j-3muj3=qOW2Z>qR+sEj-I~r;!NK0!^!;wD z*lqy}~u^HJN;vD}JL>f}TZhtytmmD$X>*WUA#WZHTn zr)w4|r$?B>Vwr}QQ{O5Mo0n`FPx%H%lgaoUPGZKDTEvD zr4hJZZ68qGzF%7bhk9Zhw!r+MmLzt=!$CXQdsO2W6SvfhbKPpC@qHRfJhpA$2SpdPQ~-YYkCgL!r7$6HwZHV`I|2Wx=XnmzAPv_C5~rmFcR5ug4HY^=XCEN~iB7 z%g3BI_j$KNg)ZJTk1SxDH}?8_p2XN{GN&qW2PA`)^o<t zeOt=W=M7vd87<5tnLVmgk-^m}Gw9qp^V|1~h4z`|5aAoPxILeO*73>@Z&!NwTXLw# zNPW_Hz@U&;n~{pHrQqQ(;?HZVG0gu`7P^DqHucQ3(N9-(CiBmdNC7bAQ`3R44 zKymxd*2U|KFDzmm;LTPi*PWF8tvJtA3Z@XeUS58o`=8}7sG zpN~f&@Woe5yQve^r?<_oB2TNKD{hS|t1NUQwpONzkraA8=ekr1S8du4l1MBXr!11j z_&tXSJf>Q0wJK;LNXI?2PwaGMW~XTHyYsAWWhJF9(L-Ydrd71d+gH6(RSXyESRO4X zel8boY6YO;`fOF5VS?x1ldnNEcNk}{wDmt^d~x~9@ncRRqn_JiE$rSD9nx+}4=A51 za=pZTYgIWIzMzwHQ138Eu6ewi(-4M^JGYWP-f#YWGfFV6Ek$HTcN{IFOGVwV{9=jB zd#e+A%b0$A>*^Tcz26-tA-u6#%XL{}L)l!{KM(eNo<8FL>0@Fd4%phv$co2qne!?e zLH@k1)b$FtW>s-iEG3aQfI=lmZw`BPY9)i8GAY*(r`q4a;GisBt*qKmSY>y@&^1i` zs%EetZ3U|fIRF5B-7Z-rRAl0XIc%=Bu*(db<U@L9bh`u8|rvQZR9~#;=iUf3+lPI;nU6axteO^|CKn zK>OC!94!%ZVy_wlKA)tFjKcH%Z!XT8_gL~)zg_9)i}N>^8eBBA>MZWE>GQ+@Nv4D` zt)Xu(nMAu=J?E+0*^MpAm+-KsiN`(G)1w8;tmvW*w~zOKqj>z0KzddJqP=`~Gyrq@ zW6P;Oppx=CvT|GYMFX)c!QsKdyjGFHBa!oVxD@;?%wB`8Obf8ksI)g(B5-K<%vE=PfB^%1B2Jm|I&TgN;YWdqOvpOGWwkGfx_=26bt$^gWaNcLHtuQ#F;u zR6YG3?66E_B&W2EBEARfv|mmZwttvIEY~+@xhs1cRxITpZAbJE3iY$vHjVSYh^pg1 z`1^FJ|KN%|d+f@^#kCBiHDgpnFEno5;%cvC>Vw`nI>N32`|PN0s+jsdfnKjk&>qp+ zco`W|yWcES$@_HgfZi%VD>tOoQg6rY_GA8+Sr z%cKma+f6uD-~5$>ogIo636E#`isRb2M=)7q6?ILU$Ooz(=an~rfJ#-+cyxkgbU=sU zX5A%WJu&?Eh$5&3M+ng_XnqV|T~Yvs|c!x#JCB^r{c6VcHz zL^wXQJXT1GMbYArmkFF&;Vdzl;j@}50SbHcOQnschnfI74-03eC+!=#Wr6kOcM{DCry%pBYjNa@iV1w^n=T6cYPJf&r2~iKtJgMv=z+R@(Vkl*=elxA$>?XHOTcaWuEG z(=vGp5~JYS#ChK*%l7e_mjk9J149hC%4N2e>OHUDD-Wq`1_;}{eG5mgjXHoX)UH#V zp6E37ZdPpHgV3-yxGcF}Jt=0@JuavL*oo>)X=u29Q+c^WTRmkOr5ehsgQP4IMq^4} zADdNch_=c}ochh$?bll@8y-T>dCYQKj=!R~$d83vHt5ZWw z)ajV7UVVgR=ZNM7EU={8P7fGqyStCK60<{whAGIn>YE>^}0xi8eh^q?Xc}?YaDOYDKu8y&`V_fal53rOL zW4lm2cIU6DAe?qmoL?&nD?DlVeWq(|xIDIWnAG!MwOky}h_rWO=U=9WN^wvifLwR- zDxa;}dGDZ!t*C=khjCEjCcVPreLfrg_5zJpB|E#OqY~6>sFC(p-L(Q-JAK)9zDA^_ zwD99?rC6?SRcgR{6j>qD?($uUw~Ub<3d_O*>FuXXgP&-YXhiGP6falle>D zEW;-(d+HU+xWm~xMm?AQa$3|}hCK;UCx}Yn@j`C49kZ>=74v8qS}n!dRp!^;Pb(U1 zy&|nUKmj0zLeNvez~Gg2U}ky={b<`xw6zQEP*_m61EEJ__KkwvqU%&&Okegypx&u$})cUFA6 zcxEIH%5C_v*vG8UBL?~xCCL){iq_-Qcy>cb3ux_+ z`X^yZU?fd1JBJznyDZ2Pi^q9#aXJY6_IbMr5vjw31t3r46vWpWDY$55Jdt%dELq zy74pj@;a%we@$E(G{5GM1K>o=3o~6WuTGW-AU}D{I{N&b)B78P2Ok+IB0gNCNB1Sp zSIJ@*^xTOD@~-mmfea;2;ANR*fJjj}LP)_kV+$0#^U~VS_g`MKn@IWW869To`>PjP zP1i^AxLj8ir%O|dU_#mm<;#GAH@K&k;vd(7Gwo)$ONk`sR z!PDvHF*f`jKw_PMS6`}aVDwDd<>~3ZJB=f+B`_G~Mu6I_By6}`x8{?;IZz<*P@`Js zm8D%Dss!f7AQyO;$$*vF`xE8%FU5#fxqh4I*-~XN!CfBf=o@5wed$|370m-V01FFi z1SlZZ0RAd74(N-a7mX_5{E2RxUnABhpol5h3!QUb>_BQ<®tA8rmd(ub2DYSJsE znwJ+LiSF(%TwQ7#K~F6hWa0A_oh>s??S_f0x?kt*69*qG-8cm*&5D5sDOQ;T84H7k zj;%*pR!6CJatns%8UulLo(3C_71 zP=&6)eP}oQ=J{x`W^d!C#m=WvC%(iEG7D|~{fzD3zk4;F@As~}Aajg{xNjsE1I&fj zZ=bpye}g>S3&vr04u)!$Xe9t;Y2AaVQ9ez#<;B8BIejib@mxtCs7r6X&+|FjuuQ9e zbn@@-bX>dtmTMB~%RBBkiYDjZzwHCFO+By~Qjuw2N%Y}vC!XMWEqZYn*O$uMo4Y_R z;`i2-*>HR`-D6lI8!#>{x?_ZIfTHqk+(Vl{=P&P`kJQ;|*Ej)escQQvE(FIxc^8lo z9%Q|Fm!l*1+vs6N!1=e3S7iLk%-ZFw04;$JQMa3lJxV!NNah}&?FgG_J(;)L`1VWg z;tw$Qn{+Q$fPzE=@(Q(0kQbncvRQ9<#;V_QAV@b!8@M+KkVy(I0J=bq+wS~asncAO z=4xNc(iRy|q}?jlHTd|){{jFw2c_-%JuLKW1+6t2MQU8;-T1zmJcU+C04K|eCkvgU z4q$*hYt?6OJyg3rbdXRzTJ+%;=l1t;IbUm?GAK&ofW6Xsm}a;mEwY+rD?6)R8NO7b z?$D%<$htc3(lrDMSvBWCg+A2+cfZbVLK)CVA*0k6>aYOYsY=tQ-V*_Qj+6=S zp1bpu;-bxJpJVB+qs?RH-Jimf|6uEh1p#<|ZR!CDawDd7lT!0FWAb_8Jgn`7?`TG= z(gGQe^LnZ>DSgMU(p`V1GG zotV7#)?^3U;g?KlaiFq9j_894s>n@GnN*7K-#nR^DIiWPH#7%Q%EW*ozWGW|g5?z; z+WqO?ymsS#^-$Ao=ctz6?rvxH8i0^ur7Lq`q0N&$32>FHPDHQHmsq<#ep`9_v=dlr z)Vhw$OzaA zG`i2;=eLGLU7Fn1X#f&h++4F~T5T2tZtUjfR^LS>;`d<2|8y=UK7N^egL29fpc`}p z)Wm*k(Gy0I0vA3U&0>uw$6K?FFkQ=k(sSTdqCqr7%X#npGzA|tdmJ%uO`PQ}( zPQ+rqGu!wkCG#+3214`-;d^iQ46V^og}^Ke<~uh(@Ow3d0pQW>N}>sf3?c(Ky>`tY z0Ot;CG@uzhW>LcQ*KVlG8+co5vlM}yyG|aovzVbiWhZ_GI@DYp%-Xdd_eN}8)7oI{ z7h{@_AHUHkRPCHkSZYR)w_jcQM!MI`c&`oJV=`=V^Jt#>B~%mG58!n7^7UfYIoj7;=+phZSD0+79(+(yh)d;)9-haibgVisqVb0VDt+5ic$C-cT>z8M3kfY?I|suq8{* zwc2zt0xtFLt{!HsuDUJo8lc)?6f4^`ccRp&bvnliDCu)=k}fuHmNzSA?*dG`IF^bQ zHGrD5^@zWj2=^`?V9j1J8`RsUoZdXCn=oPlGG{y?2Cn_QZyc%=0Q;TcPVv03{ajvF zmLo_;cmi;~ec&GUw~NTv{X>Y)&NV|yK$>`V zf)e6Zn6m5*P~=vlaH7IX|3ldU+q0}IRuJ9zHJs&xV9w}m6`^|+f*ySbrw^#z&<=fD zfa-WJ;1OBi>8_DoCP72nyu!ds3#x?FV^)mwW7O*~fF|LD>;i7t3r?B+4hR6TQnN`x za|=L`vH)nz^lSO+q_a$9ZjvT|4%eXKv^JL?9Zm>byDjg=6T6{pfOF~L6bl%Ul-eBr zIk&hqsuxaer}^IN)*=_0$@zEdUY=Ay0kV?F=|)L~IpCM7J+}3(xu;vj1I_hQhD|`C zP#W7dRRtG&I!IjU(QuQyou_8v&$Z}37zX={&RrCq-2m%KIC6w4A?_>e=`$h)vGy$2 zD9SOa9}X_agb?7PbIjLK+tuX5UcKt`8{KkC_8{ij%kKoUk+Q;u9X-}Zi{j*v^Z+8D zZ`@j=8v-DnNy*IFghfVr9B&4XSIOM6o$l|V3Yf*B-?#)iQveWCzre;i(k#_^1U+3LzNT9$f`(hw;UK@$l_aUBDLZe1 zI!+b>jAva*H!jg}Ngs^4?7K2o{gByTel6bH16HdC@Zv)C>+xVJ|HGJrxtK0xt=}vf zCQ=Y4i@&Kf<{_;D@#WbjxTXvsGU^^zyaLskS( zTu!r}Z_xz=a253%4}OE^uF-XvfVJ9qbJTcw4eaKYb*Hs0|JGQb39vyKftiTA#=Smx zN;t(66Rw&SMuyB9g^MM{U3|jI>OlbDYkxDCT>;8LfHE3X)l+Q)AE9IbD8zl251^)n zPMvydSmq6(@fkinOivV+??-XE8 zu#Rpzy8tiCi3T0!E+`h{6wlv) zuE>JQ`}4fON+ZJ4mGuASor%(wKCYG)i}K zXui2k^m^~_d&hXcF~0kU1Guoy-fOQl*E8pQo@Z@%7ZsNpE0y(>_;`&9FEpuSh}xRF zwb7SR{3t5@n5dL!#wvn&;?5A?asVVH9l#p5+OaaSbRR%P^d2gzzMK&HXmM2Q^>I#B zHwKsFFR)KkdDrfw7cp|9eAyORP)t@iCWxEpU;|A@pmx5U3`htDG8+|md*YX!={qD= zW85aw^~!hGql4YVZgkV z#0gI{mox+6svkk!*^b;sB`E+tS*7DvO3R?NMMK`t@)f->oi}uH2D)L|A z@(ePZhOqYiPXxMoPV?a0%;o^Dv85Kwo)34cdP$QS?lj7thHp*Fm;OlMO>|o(SJBcn z8a^YXfE^lkqN{~h-`$N9&vSc6E)YbWP`thv+Ud~uWESROM{|{Ps!k!IJh0AQ7P^R^ zmRhuuXtezzxnAxggW|m5s7L)NIo-1zir(V&8wz2kF0A0DxJUcxeG`Th=Y?s06r3d;PqD$sOy z=(hi=6@}^f5(>mtEK$FqLV_Tqx2C_bwoISWfDR&xvpOndqA2yTO&=E6;x(@+A^s=Y ze8=>ZmHl)v9OR=2OeGi|@9UbPy}vRNA0d~gOqcM&Sk{_j6X(9k2K009T1H>`B+M~+ z+r(5FUV|VsmPj~*qg^upMqBzh0Jn!lo#uI5Gjlqo+&7lv3yr!9y_=1L=jn}l>(uMf zDK&+!0gWOt_$>at+-J9jQ;DQ)^>pm?!7|~p-7)RFCd1xT#`f#FH$a@zkYmwvV=A^~ z6^2;;ic*9ox#U;ofhJ>vPVc{Y6OdfQKrV#tcHeaM9@K?h(10MU^y9Wm#_+`1r^W5( z_%%d-Q8irBHd)a%<5MV%}Q*@U9%S__++ z61&ggdmysQl4H<)OL=o*OJ#ctawfo*yuCv<@G^gL)+i@#5gZvZOS93J5x#!oQx!!~ zH6&T9&-4$;zMyg{?@>$h#AGIRa$O}};TeLD3d^f<;}Zk~%sY*4*$Qo+1$?vkA_GEe zv1b(RV=M3bnT>LzBxvlWV&i=SbwmO(*_0$7!siRKx%^Yw^#KT^wI|#obxddb%iFLD z+tZ6f-l|I7HW_Wyv?%1Sd3-j;xGxY5RhR1ym!X;je3At#=W|=gfd7*9t%5K!@-U#=Nsic;2$NCg8AaK7v6oew58x+i^ASNxOJavdev6z=sA)tJ zHbH%mY<*GSz8oz{&YM`>Geu-8CV-8u)m~d}SM}tYWDJjc56CHRmCxX^=k;#uvJo#S zddIVfK}55AE`sB0DNY&loKNYTtJ1emCd!&0pwEpvT@GvZfpCgJ zsTZC8;&@xPyx-gLsk`I{L~gwV<{G7ZPlyWp_-JY14%+#b2Y0c~##;HP6P7{Dt zBr`W)#ZBJ$n#7;l(52@5E&Bb+>@kc~OAum3Z@IO*VA*PVB74e5C!tPoOycg28Il!a zO`urGquD?t5N$kFFy~d8_wq3af?BXGDYr-$ zFax4!^1fhDUtdpux_De6%J@=*;~ z=d$m#l**QG+VuzoNeQKHbTdQRd?h1XLbS?Ki36&Wg*YAWF5rqUT&gvXHK-0@wM}%k zxn^fum?I8R>9Vq3-k3xDp{_#boKorY`Q{fETZ;@7rU<*}>G(&&C6uI;6@pK&#IwFa zXvJkr0JzCW4}2eAG~sT$R77Op15O*L-|;*_vPTWgkOFT4In0^V(y|1!>6$36)8EwJ z#s-^jw7t%Cbum}qfqmU1$jWe^rc7gDS<#2Krs__mg?BxSBVC}s6Qr#@H8D9K)xB!d z<*XyoWuTZ9RcXgwvC-aIHB!jaZryU(&`}G?dIX|5`Loy%1B7WQ#)^pZP2UN9m}#m{ z1=egyI>?3ERju99ZnzLaFdsj5y!tR zu(x~a(B&R@_NRyo=e6#3HHtT-W>nkgIS|t+XiYFkX z2|_qHE0xw3t6S_AWqcIa$`%&eAKJ^$3~P7d3CTYJkU@RvM)Ky!7{3qnmK3KN3HqHP z7jk$yqE9PfiIp3jOs1XQ-`|z)x@zaW8Wk;u5L6s z{{W+m{lv!4f%PQ@wd>U&dGi;SkJM!{f?rkqXmb=Kx&HAHZW@r3^s6?P z+!yW9eXyG)Y$EsAu%&**)AAIJ=My-J`QbYN?y;$xRRGF+UfCaQ9tjhXq>s)X6|7ptMZzP^XAUdU!Udh|RZ zBds_50x;C4tgKIW%rvnUip0lDv(u>n*U9!Sv7L2T2<_fdd+sGU`_$x00I#zBsgAGu ztyzvM?g5`7jxCGsEdAO*=vPF0GJ!^5fy8elxZ81vE|}-)ty+JRE3uFi)2~{EzMBB~ z2eo`oP{avn*=X&AGC`OW9keTjPmN%U`0_U#!;ain-=0z2oh|f*H>X$kYBpPbhs{(W zE`&OAavk&^)~V^B7r4p=(Gw1a$op0O@(F%e`*=k*M{=ERF-#br0Y;jU8#T!LeudNb zS}aSt>dk7FXMt|Y`;fid29J&u2g(M0(x+WkmB)}gJfb@FVoFHq;_>Sr9g_3a5f}-p zw1%GtbXbKhV<|Zm0zEE$-Ti6Fm@vR`jyLiFQH!+L#)1rozC&znT7q=og8QAep4kD> zq16^gChG$WmxCr|UZ?4BbdPA+HZdUY@fBEJwCyhVwG-V=@sPn8d#}iL9Hv=`6-9oh z>>m2=0!>uxC6|1cgVl$=!ZbqmI;bN%4nMyqq7eM_sovnKFbJGW?X+ho;^Y9!y8%+> zTo79w=dHJ16_N_5XuDAt*?`E3VC-}PXpxQw00YOztWCQf05-i6kmoJ7@&YQTu1;xt>-?+$*xpiDmC6cqy z+SG6b#p5c4ja&wU4F^9d*n#Jb11Y%5q(W(SP=Q|5iLYStNa4A!xnQIRK}hQ;lXghw z5y*M>)zJu&qcF!J4_hg=~hD zaDDud(X<5+f*U}KO(X7BK=T<_cGx>;(TRA{#^x$4`UwdvPDMeGMfRpOl$9F_9IYKe z<)#1eF^vI1B$3EMSYy0mAQJZZ&czZ1W!54Ud+Q>!?ve2grvs+F4#RcNS+|L}EZwZX z5I9MlOxP6;Fy;e+-s<862Z%WNTXk}+Vja6F=tYh|JvYOVXb1|GmPrK>mflz>W1&Bh zp1ot@y`avsGDb|kwuj>)r)s`TF(n$>?Bkv5v7pEQ!eK2)@KA@5Rp{j~cg3Dvr^->j zX@(USutQTu=`jrLoW3lk*#5nijxe?N;!(|KhzZABN~Y%M;lC6jFN{!2efHJ8!HaO2 zeL#8}0iGjMh&4m6tfzxr{kAY7C2&zNvN#bvspB$p&akUc2}(M=XE?#ys<16pAFEgs zE^dz0Zd1n})`*@ms*4=A3oVI=Tk3@ByowaQs~}yUYUbYYDWT2|l;dSj%R9}w@`TPk zt~kN2)(hB4+phXVH4ejKE^0mJ@i&}K-5>-s%*{5(akU#vJ&o?Ry#zbi^lp;2UGAT4 z4{Xs_qEC|;Rn%S^@UF&Qiydq1{knw89^S!=Vpi;0`&soT3n9YxaZ9rvs!;}96aUa_TNPK(keGU|)jzWTiIlaprcWe}J-0oy)<`pZhPMv;r7VLnk#`dFw+ zd6rMabq=U{g`MsQQeoI0UF>8z5lwVq!Er`yzI$Q3jkogUEfYl~-*|J2kQJNkJSjOv zW`cHr?3wl03o-j|=_ln?a`xJr79`IG5CJC$0juw2t%{dCZxCMrmm)T)D^uNtetv#T zE2S@1cQ*Jp(!Ts^prRCnqPbX66rFDvk{Qt=u)O=iR~M?9mv8xoI!Orp(UR@vhGI(C zMnx4us(G?FVQEcpTlz%ER~Q$U@%YJ;>XSDIlDtDERdi`@Bx&WQ1FLE^(I9)IHBN}T zJGFS@C>Tail7BQls#F-Komm45^m^REhHWuKWiET@ShB$ipx)ymSN4DwX~U@zN7&OiHwYft(6X$%sXZbhp4+`tS(_%b zqMRBS9&TP_UCur|P`Pw=b<9;^okAMFAmof%1Xbatko`4<+Q+L0+1lwB>Uv*^F{~7f zWkA7Ebgp6OHj)tNu_5kM>)wytj*P9jxlkI|uObZ$$335Nt>c4*Ro(H*UKAIMUSL2r zI59l(jaar#RvPv(-UAHAgxgZMmkvK<*j%ulUU<0oum|V|gFGT?2t&e66rv+Yt48dZ zq7Hr|N`6OngzS`RK5S$$)+767S2*s2<3k9k$@)5rB;x`m8=}?~Qlk(>WmlpW(dYaK zroKuRlo>HI!Q6GeE6_Jx;Y2d`DlY|#^vA1XvYRg&qEdx!{&f@LM~NQW|yWz>u!M8f#h z;F5%tn!`-!y5!vG=0=KdF~RDY;f;npKcFe>YG+gtSH6{jf};6KC)XK$*ZX20Y{)Q7 zly$X~@^i-VySQ%2T9G1ufZ4Y9CNUoV>tMcrI@-LIV?6P6HA%N_?1lI-K=98y)mH8z zsn`|wyU@*z^+|m{^0!XHw*<5-0xeCv@iay*_=G`BgjZ8q@#|WKJyKwY_a%foJES*QbP)TSJR=%HW{ah7yu8fGbFa&NI2lAM6Uy;;b|9MUDmUUVkm^}%=;5mElTrl@6 z&GL}bcF1QioG80GMSIHm%D3-`w!F3PiTk%vz-*%?fgQ2E%LNgFP1RelWV=z5 zk-D(86xe6{t<(5z07^D9?AfL-s2TQ^GL(h;P@=Aqg^uSNkue!{`Q$anP98|Hah}i| zQ(nEOd1KSOIs0Ogt#gWN)ERDxX$1J5r9p~k&TBq9Mko8-B49idq%x)pRB8EAu|h0TcyV+2Utn& z)5>zb9u-iVEc zh}+vx4-WP2STgn|U19n7x>5FD-T)&#;coDWBvK9B0OMDpZG6Vtc=-97b-pO-%0Nb_ ziy|i{kBV?!uNq5ZC10|g>c+;3_L;sIA^(?K@=<~u^5DA^-p$3yt}(K0qwG`!a283f6y?O+_VUr2jH z1s|Vw+cdH4=y8I<$nc*$>WTq zH*L5qF}TE&V@-=k5OpLnW0^15Rd}J0o@)Hjmq;c4f4!Oa;81Yxalox_QQ^KBChNna zSR38vkB9kXob>j+w=*;n{p%l-GS&&=tw`_t9!?_ZP_*+Mg{d#-?5#jc^ZN|>_U%~- z=o9cYB|vKV+q(JVTMQ3)tdQ{@`LLCa_LtG5B!>b{y;3|BCH`U=^6O`B5u#XCB=x<& zTw_c4LGvLaA%PY8_xSit%bEecV?3V9|K-tzJbgVS6dcKIN557+{{03StB^Xyj`Nn^ z#_0Vcw1J72UW`Ghto<pXYxKC1VYwtsj=RHH!Y%Z*9Z8hX)Vb-zUfY-vjwY3ueRr&71S< zD{!~@Vb;*j{>o-dSUE3M$!pa8;d~jFks*60?1X=tnC7HBeYMAoN59oL89x#vnsCJA zWuN${A#eTHXOPxH226Ny{@{NPm>0~b5A|1m+vooreAv(5BaUvbuPfI6^}~KnoiEZb zUyx%T;KNFUwG?|LnUtjZ&t8GJZ0U7K6&zED)S0YyX9zoGNwpaB`N2xYn ztY~^ZBx7J=w`0+R^{@vznbZZmu})ZYi{!T*Ynh}~-E?$?X$rUTF{&Pak1Yy=0U7#ew(5X-;d;9$gQM+QtJHM`8R!(ne{ z=j40})f{OsT?r}lqWU$Q-@YP1b~WnHTQ>{RqX=Ot7`zPtXV6`|()rr*g#BdaJDt{c zG80o%ePGZORf5NhbdO~W1td;PNGlO?jCM7f(I%Of1+x{>YOObN?@`eCxv%vxWXIDc zYvw5MEyc5Qb1NzcjW92Fd4@l?!4lrFTvagHEv@u-@$HEdH2j z1&8X&SEL3HS@7Mt`2EFr1QHCUI8}tt2$;?92w0hKD5={^z;uM?#wQ-&D@r7p><6wA z@P~`kTC)e4up_vvRM+YvK0(1Y3cYhqi(Pc~oriKdtsbR1La8bWVFMT8%nx#Ey;gz- ztPoI%XRao#w&WDCfK^Vq5l>CbuEUa_FyLjJtDaSWH~~WIn!@n`5s83B_rY1MD}x9L zA>4V=w^2d(*fjOjAM+rJvre#LL1pXfA#p%Q78G=H568>O4fgl*ty+UK&b_Q=q175@ za?W!?!|}GUF)=|$Cuq#r!X2Wf3J(zDy6ht$J776796t#q-mD4iL>)~)=t;9#{gI%N zakT%+{K#Q^Vv0=n^z?MO>~#$lmBX!C)=v=nl-u%3TxMny_i!sTe%Oy3@d0DU6`J4! z#YMvHOxpgqqDP9DexTgl!g0h=-RlU_#B6(VPA?)p$5GC z!_Y>2HDRk|6BogX9f?1E+8cH^Ms`(2COR^}(GE*EQJ#L2Bj!fnmtfdo*FE8cWHp3jBwA-el4V0cf<3y!{s zK8u@iUYjXB=V@8vx)Np}WT4^2YupW#ZTriaM!pu35k#l({WA_~PpaXF_+$dLoAuEP zQn>xFiZ+kX@-)X_&UQK9gyQL-mCipV3c#fbeDuPPcL1BF!|4*5HDin3381VZtQqIM zpTP4`|I7OaR#nGKr40|gb>wn&naaR6E7=k zH_@)f=MK&N!E~QgAuy5%R|mPTkF1|0ApZ%h#N61XqAPM9n9PJw4vz?937 za1`DN!v@!R^ZbdEg5WXeDtk`HX(SfL#TrXAz$YCeliC^?LKH;rwsbY&<;#ic0YD>vs!PV{zX{3(07FEAsnrO2D+Ootr9wMZ$2GsnT=p2 zXAtW=xT1r_6c1L+Xy2e)n++_UC*0uE#m}H6uw~U&o{tMP_k*siP{u@GL4k-P6!-gq za7890wRI`N96^^7Xy8&Cy_(;2eV;Luo+!~gYH1vo>-ARyd~ohV`%xtw9rGz-!%5yZ zymK2jQ}cQvxKwEN&o{2xoa_OwOU|Ki=a;EL;0e3zwNOIiFxnXJO-Itm!h-MV29t;1 z9+*zUSCycb-|t?VXeSZZKxqztL?JyF{{0_W6yk}sVzfc&)T5C z;^Vg^&Ju;{kAObXY@ZkzSweAhHiC+~AX$nbW0K!Xd zr<*O&3I0ByB0Q_)%IDOpQ>+bq_H3Dug5Ud(l#Y@f&tUCNsqc3WlKaA#Xri#)_O4M9 zkZWq7150hrj?~ z@r54pmN&rLobZS(Z;X|(m5kO&lJx=Cw#HfDqqfIar(*}(2X(}kdhyx0xE8S+ZMAL& z+y_Q0jozI${WxCE7BN5O$z|yD`OdzT<}WsNv7n`tE`EgS9{wbxVrG^yl~S|O80^Ms z>?$Oogr5g3IRD~}h1A6x#Vt+OD|&x(`iGvy0^~Fr79XB%2_f%eJ$RS=Lqkoc+TwWM zoQZ|tfBxdd0N3-W!GhLtQzys=hc_(1nJ;~AW2|_iGMqdQ2=wdsq26(N8REy01OE$h$U19SOHzAmoYx_wz8bGD?-O*_#B`leB=t)3P{(o{b3mXXW1SVvMaZ#m9GdUulepNl$O!lEp=n?N|vu?>6fnaepK8 zhyfYgStDzu<0@E6T!z|W#yx?6*TnLVg z$W_-RT@mCz1xM^`*~Z=?TifS_7``n(yQTo!hM-oTyife|=WoJu^Ea^0`S$~q1j*elPh-;%PGK?`J};Jy8L z!Np1uc-O8&M~@!uhejW+uvK(TF2Xf<;?#?TgqaShw5$w*(4@qy>v5-Lv2-*?hvW5y zD@hNA4vwCb4GWp0VwK~$=XR01aNcitCsewAZNtE_5XHo~3+Ah{@oz0W00OC~tXxMa z_pG$!#DK2ikY*q>DActq@3<-i`gQ`9cuHufA*iS-m4a_tPx;EtavFyQQ+lW2=RO(l z=fkYI1v5Hd44Q7|lrmf;d}- zxdhgt!zTH9Hn_a@h|(UR6LENjfStvnhYF8+)+g<)fTwe(Q}$JIvK->c%E5N?h6@{t zqNywf{J9@ax*Ijjk2DToi0NPTTvSslN7}T8VU~9bclP&}`&a81Y-#5s1(35yVtM-t z``%yUfw@F^lju!mq5En_PYjrf-!oeQFx8(g>8a3H>WwS3ryuVr?&-Fu=^sh=uj${R z`~>)17K_-qJm_^UR}VK95@E|Pd)4$Jz5Th&p0v64k}H9ePYfF@OX=7^Zz5nJJg=du z+HYTMovTtzmEu5k%Ak0qIn`xC#9=xTkrGE?6y@`9JwV$zV8yN!?@1(M>;3UvERcW8 zA)XK9S3!OBM2*OtT<+()2l^pi^It}oOC930XPXDE^78V!4xRc5vcA1%lGJUou6JAg z8@9am`|Nl_8*X_A^-O`qcAET1Fdz7nS?`}0f>==>tjkJB)E@RG9`RV#+B=_j^YaA>I=w#s~Np zE;S-1pAMAMx=7r>3v9PgPt}>oNsT6ww7sbJB1ZP(=*ANauXGRaNQK3lMEkccG?<7s zNJ*%aoc$NmX&WYJkCEwgsf`Upi@{fI!PfxA6t4x`*|CGm_)1yTZQ1kZ{lJ%csltZ- zMo*D8LS5Ev0dx9M;TBJz;4fFmC@4^bkPjD}dZnPfw<*|WQ^en9QR{UB5Yg5acQ*}i zcZ6#28qgl(m%&}L@d_-rkQW@(IMx^1vldr-IgqHia4n=m>cUjx@n#yRvnJ2wAyilD z%0KcHbP(ZN9CsU!z~e7l<`$YU9Yx#UP&g;IeMeRBno=qVj%M_yKsaT8gMv?KzGrj2 zXE0@*_EM3xW6k`$4I(S;Sr~j05Y4;5w=i5U!IeLL!ny(w$ty{)UDvp1UG6vmgfh>? z(8|inxvo552=`d_z5!f(){r*)q;aq87_SVj%ha4TJXg|?5Xd)J<;!h+a~h>pf|@){ zkHrh!$3LaT?O$C$_+ZYd1E7Lv3*2|=WD!*hv~9`y!K`o(or?d<;ti)@5yy1LjxR}G z53ETq51oiP)?Fs^TfE$cISGYYQ(hW@E%2C<0ae2owD=;nSMiEB_DyJ8hGFH#`l@T7 zQKWPrhV$65g@KXN0CrXuHb6jiIqdX%#6_OlZHCOb_>@W=aNnq@KUoJ@L*5W7D^Vnr zN`Aw;yI0F^4ubtYtl6+!dl69PDt1X7JYoqNyEU<(2mi#2)lM;1bVDGVfkM;7mm#OnYJYRf>8pG-nJt=b%V*eWDOpzz|x!8 z7CA|X(9$i5k`aZnK<7;fr32Uj0-{$aZabX=pXduO;)wZpXKXHpaS=K}eda;UO)U^k z7TP3S4sW7a+mX!H{ir~r9*xq zr2hsYLHr;nQLh!93eX0T3-@pqme9D@Si!*y%qB@AK%-dXtb6!y`)c8wc;0-%%I0ED z@q%Vv#`EW=5y`JJO&u|VixQM-&O0B>N=6`QIPi3Jk0aaE#O2VU9TZ#F10aUCh(mUA z81<}@#jqR7l`ErfRr#=t#$Kr1xkY@rYn8{5V^ zX47-Vzx{*(tsGya^=S9qn#^k-whw6vmyHJQnjQ?g?v-XZFApV=Ub(*gL9v%?m+OEg za%`Bky3TO?`91pWj$U$qESqVPS51Zv2iXzA$oCxz6GV07=+W7%<=a#W({wq5Xv3$+ zv|jTVJ&@KuISZc2S%H3Ga59NeoubN>Wm=`AmoKwxv9614cD=HOE&BTUZH|NDgoK1v zAP8#XEuEd4lh2+~3#8)bowl;F8mDn8^2Z~_v?>Jg9O~Wf3-HqUV-(TQ85Z(5XF&5ymCa!HkGZxmU&O(d0)f}Bt_JmC>RLP92Z@(k&NWsvv==Gceuls2St5K#5FAZ z)u)2SY$Wwx#7tbUCU3w(ez+(nh8=lP8FF{~iZKmsr0UMDImHf!AUINn4e@c-mNYOh z=r#Vci+KA&q<>a&<#V0@sS7{yM>f>lMNyXKtY;^nv{o)x?dAW~I`iiCrsn2yIN0Kx z=W$v-Q;P1Tuj2AK5f!UyIR?5cskIz`J{=v%}9&Vb%yNi?7tna1dvkE zqa=cX_(vJ21nSPbyZ?k=-=Zhw0hD~`x%ZOJ#w_2nhu6$Jj21Dfmw7j&x5k!VEi3qw zs-;IyETgvOSzBW9&)>{EQkVN7BkX^4qF}E@T2@r=&K};9sQdw;zJ6q7WR$P z_gfn9V?rZSn~@dif4t|Zm;O1=f6e%&Q3Gth^)QMsG7rTC5nsN$-KP!1&%}l?*-hbn-<3TBa!ctsjH+PZ%{QC1*{`Qr$ z5?;WHTpcF9lt?c{TANeg?s$8pH1VNvx&zTYEGcwh()T?!`_IoS=C?1 z$o+VBco6je=hNly_xk^Siomp=uoTA=Xx z;s0@Mm~*nOtivUZ|8>S0G>4bqUiazt$^6(u{$*Y4R)vs!i$bL9_o~dFH+i2MMtp`p zd>#?A{cUA^`x)bDX!W!FAKx5FDB4*6`9BIPKW_ON!vDDOm@4JlMpR;piS6k2B|M<( z2NS<_Ebh$TZ^{EMwN$~?jql^RWVW!Znqx~I2V!P`<5FIJuc<#GglLGp0aNB-8nMhBjCZPSk&ttEmT%EL5Cb#y>lYs7~yg--I3Ud zoZ0c5uE)Y}IefxHm?en&UvG?u>P*w{>@JX$Wz^`|tRGDcUD&;3trD8|YdpW`pbZzR z9VZSuPR|6cuY`oPoJ-GJ^rIeY-jJzIuQjxtnE2WYE}_HZ;jfNAG%;P2(VlRt$Ei8n zpI1AVri%}!(}pmFnbs1`3zr`txTb~1cUyU+D-TEhwkST-GvF)w=+J6ZbdPX!?7(=7 zT62@kIsGT9txfGywu3#No_A>lujgEo$fdr_yUsT=-mzn?ln~E#acp#_hB&!ft(z{# zuxJhRisf83^AqH?B}>S9Hsg-XY@F}q?v@JLvWvcEoi^n7G_+$W_=;{Ty+m>(1D;qb z(XEG+!;v0DcsTX(r-k_^S5O#NC)>bs-2?3Qn=8(AoY;$PP1w1y;eFFyL`~#R_;LRnMliKHFw|l*DRm@RurPM2j(xF8OqZ1y znkD{lQafkcMlKye6UBU50G?=%25LTJDuT*?{dqmf3y(?T?1qq;7~@a7L?_fYJkflF zHhaq!>72)VBWhM2HKj+?#FCRboV9+R&_ObsQS8`ZzG}uC;i|&WvnhE?J(cA3!ur&+ zjl5DV62)a z6Fx?0hc3a5YdUXWr?bjl?tY7NCnkERd6>0#b;sH1Rq<<2a7J&iqU~HEcYehUk9b19 zVS&EV;v7#m+UWy4PI)^H@?s++;-#K^JXGg~*A=QF-sgH(Vb6NizCD2z`kS^nS@A<_ zi~B0DBxg@wdAE2z|6)e!W$F<0Gm_iXbu0o4xygb`QytDH`tM9sH5ux9x@izsI%iY) zb}%vZIxnUSHIC=gK6tpa;eGq4{#}U@9L@bYocpQv{igbqOOLX4MHl`GX&3g@>CT(h zDf$y_BUw7_ofued?*sV>nSEDI0gi-KaDsC2bW80e-en*XnEf zRl5_ggSng?&Wz(-gKay7*$MAnC=pq`lb`tZQlk3>7os&$hoNJ5b2Rtg3sKH#m zTH{lidKG-Ow=-^3oQaG^v6nxQ?Pxh@G;DOCy1Uje@-nT01G>aTAatk+U;g0;D-6uY z=jiXJRh?D1B0s~XtgXr3l@E4cP2**1XUj?6`*`pItHRm9_Q^Q?!Q*EoRCY;Af@!Hxf+8a5IJ0;91%{lo%WYDxK|%fySuS!{_yb7fNzu;$!m^C_ia4$!xkOS zOs2i{#k&^b!Oa5peJY=mcn_CG=?YIrI^u<|51O6w-;Z)3dMjK5{KGMsE%htQBd>-t z_^sw9+_Rh|>$oY1gSZaWoyOB%Q7Jyaw>~zo^X#UQ$M4P1e^NUc4G6wZ<&F-+VbuWK z<6z(D=g95xZrXrh9Ls>!=5=nF&t6@VCq3GKg=FL_S&tG1Z@ewraSgko8hAdx-3)V8}W4sOwZca7(s*rso)UzoYJNoJ7k)ukkq+Mwmlv{@!c8(E zsZh$EI(^JS{`GtOV^eV*8}02KuPJ^*C&RA*15r>Z{|Xgok?lckZlWtsyh|tR+29So zLiap>!h1(tCtVUifY(hk>R+-*1mz-h;Xaw~paSEAI_#|Y6dKl?!3k}LYW*LeMaY6^ zgz~f)GS=wG=}l@-5clf6Q^NnxuT<1ZP*$NQY zckM2#Fka)jnp;bPr^2~qUXm;!7_qPS7l9njL`FUN$&9-^p6If+GusJ5S5UR_kOr}M zUY_KYi=_T7!9NDC#MJkm|Bq1wnmaSkP?q=jJoeGUMd$SpmgiAD5uHui(!|h!JETgE4t34LKgdb z1Cgm}sgM0nw90r3#h!54Zloi4X5VT3hritb@Lfx{meEVpQIEtlO&RnD__!*GcfbHs zQec|qx$KUQ&+Ma+uh@EROaM{>iFqgZ+i3iHdq2Lqb{0_n;~mu1zp7sM3(yyiJFlJ7 z38tl#7QxFXjac@Q5y5l%?N~K)c%#;`Ax>LY@Q-V5`PuzQSdb)Nl=-&KhOKb_`%3-4 zemUlRwOtF}KL~w;^kDNQb^aSz!)eVoHGY$B{g_mLmLm=-L`vDc`ONiM1lU_{MxhrnslR`A?M6e_gdXD z%3ELjk9(N*fxy0#oJjtE)wT+e+7?GCoZg9i-uv8=cHnpXTp)j3`aTk*(~c`-ey=UI z5@rXaq@mm+q}9w~$87(lTuAaCx#WErOhFi5epfTU?eR7@TDqQZSi=Pu(0!=)rN^>X zaBXmYmdXLtF|wec29fwc9-MlA5-u zeY|Gt$8EN!Rt?;OZ@A?itVw4l!+34~dOf;h+)e8)oA;Ct)pDSG*EVqH8MylbYGSZ<&T{{YJQ_TzwX&t7=&maLiHehN&dya1*zI{M4_tu~k-$O0 zCujLxHl@xyJMoT%vV3auX4dh5ibK=;WTVf4@bb^)f}tP@YGdBBnizXcSDyI%mx}~% zIU%m}P&bjslu9*$hq*6sDPXSyuJ)ZzXc4snN|d?Bwe z=!sGjz!V#mt8o8w=Juw0Y$!X9*O}X;=nt&MhO>xk)8i$YDqLChvRZ9=e_WJ#_nV#M zPBHPUZErqwwPYxsDd9k^NbP4G*7k{@Ni&#PXE?60eKHaqQ@knssUWC8RFG9uKKdUI z!)Q*3P)~pl-~;514sr0!@tvKu0{UJ+NC+)*jwEsjIvj7vEFhphJ3Fffy6_VwO-ELx zdi+BGKywC8h%%mIQA>9%r7s@~U=h@A2rb6&lAKjS8#)E)u9)1csXD*i{u%B3eu3mr zm)Ua(q8(YLc6rQf0+Wt3lX*vWVp_Hj&lP9Hye-o5?DkcF*PszJ+riY^f^*kwr{-?m zz4R#1H-_QQb@=`TT1MtU!SY}z(&D=O>Iw$V-G+0(^PNH1&LBARo@(H!ih?hg8k9rR zuS5G{LHDUlcdssfkYjOUZEK4kKc<#RLmi?SVV`L?TN&7ehc(vFClon)G}J)IxP-d_ zvoj?+b~C;ZC7Ws~^fbhkXl$aOQnO^Une zTyHH|H2R1C&CiJg{*VaI0|;#caIm`1Ca{?r>gu3E3RaF~Kvy^2Gcn1V3}9uVq{r;?H*6<-B4j7&ussG* zKlgHAn#m&4y<6PY+(|7Ex4K*)pY{VUGZ6Y(xuf+!D;5M>)5 zJ|ZHJh-Hg#X#h%jYp46>y5+v5#x@3_j*1i-_202(V9WgTpN8o^}9@0*DQQN6_}KD+0K6x%O$>_=4e-B)!Y@X~Ft-DjuF^&z=z) z$T&JWD$&yb+6YCl4qwd?JI}lGx{BAuuRzxUdR!5KQung|+}ftJ0vl6DXe?4emXjmyY z6m~dJ%j$t#gqXInvNDCeY}p(zDR-|Uq~ig^2SB^U8pw^aKrfO2g@9lixQLXk$_;|2 z8w^C^-X6ta<(@YYK$y7cT^q7g4N{-wK>?n#c0g}hJmYVKzvacz?d{!cvkUS;ZTY~j zqbDtg`SRsjFz^U&v}dw2qhXioPb;POC*yj}*uQLK7#A1UF6(c?U}I4X#Ba%bnF~q9 zuVT(8?&OnR?#5TXXvt_Y5&30l9{`bns!V2?9JlRG2IqAYG*pGFGZ@r1ZYOq{))u`P z;=RJ`@FdeiT^!$c(w?t^?XNOO#*c)+sX+)w&!GK&d%PUt$OH@jDsGBl)`gGPR{SlT zF)t%8{{%UWdLwsrGJoLX0gIsqo$!JbXn2$ z*8fznV(du47M7{s6_`=kB4GR)PQC)m5)Pic|zgT=iAv@ljWxB$vxL| zWDQqFX2Y2YqK-vopywT%GOwDRdmr*H0|9&H@gAD+$@E`8S)HJBV~0a*kFPl~=C2Y$ zOI98@_yN&iq^g5+YdAvF^b4O{A7HYjX#i=~QhRrRCxV`U&)F5ADPliycjB3{il)WX zNa<{N_<``-=00NHd98Z#W}X(3CZ|q2EWS&Sx%`>>{(Oc-dxc?%^zP&X_%O zM_-rtyGMGd><4~iJwHKp`>{Gie#B4=S|6Mp4a9j9`^)A%r<~@EoO5eIige!0%q%IX zwz~S+ty{M)>P|J2C7F&+#0}^S=3Qqh6~|uddc4-xlObGTLEhkpABA?PEtyTfPq(vnndpn7gk8LJh^oW2pB_Vx+}Z zrVA4;+hT}>g7j@ zquLJ=nV_iEhHE!!q>o1VwS?eO^_BxcIe&kIHWl9Km_%Rf%~zEf%Q9${HH9vf7bbK!_#c5l=z< zjO!aNfj%GfF>D6xf6ed+%r?M^%ND(l9R8yRjtxIUDdD6s7DL0E<- zz;nwH!ogX5m+DxsH!Oo4x~+5McG*;WR#N}qAkl&?1n<)zBK#TO8GGYk|NUDc>`ht# zr;9AoyCL4Hsu#1(<2Je7mt_-PyqLbE*lDT&_QYX8bvwPM2FY$v!52Uv(qWHvI3 zifWsfn5^ZcU3r7boqkKUKI`ynZXtI=u+ixkN55spMje^%*|#%Lg$=i0`sRI_Xn)}c z;h1agg}QF0-Sfi%lN_lqUw4tNEtJVWtE}12=_r^mTc55yyIA@m#hA90(WB%lfsJ@^ zQfg72#A)%AGoE+FSKDg)pQ-Bz$~~EJ6yo5J{Ypw75XH*iyZk+zV0$;FS`;=`(~mZP_Yr&n!@2fQE$|?7N7s(@lbLO zGxKiltb1_kNZ$a1G z?)C*Du0yIL!GVM4v8uZrF+k9#5p}XrIw$08Rv&e476_|%ckBT7xKqRQ)vqV>#?Ioh zW#3aDomN*@XM@vI70yzu{7N5s@nHGT?5S&!z#gDfx;UQ=} zSQaDWKOSd_o&uBL05}=y5m7o=hcOfHq~9^xV&OJ-0Q~h_lr~12lLkP0Mp2=7`*F0l zn2?@8v(J{M5!h{f>f@#zZQ62DlgA^J_KDC93-=EjLiB$?PdU4$aG~jfKWSdqEGHk(|J!6ohnok`F^7H#rDSO)! z*ykO*i97!v=H5Ci$}Makl>w9#Q4whrkq$vXazMJfOQoa{1Z2oDz#tR}>F$(nP((VU z8+lt1R#ez>FVd+{eLB<0S1!2NV{VkFL|)IzqI@z0qdQ5zLSr@S z?cBtnn#e(7Tg|GV1<{+3>5hU&W*Hv&$(ALFmW`s;M;|Zbc%C}5wQz4N&b&{PgQ7&M zsg`v0bxwokYyoer66J`Y6wxZPG-BoIp4&o!yK;a{@qW?0DQev!p}~4YMq%;Qg=p7O zIXB6i=q5ogX)yq67BUOM?<<7mclA@+^mUd6)6UrP^tdC)sO*Kg zQ{0J~phgp{;c*Y1Qd+Joyfe*FEVM+5?DcSGhr5!N1GO#*x=B;B~BWjX42{cmxnpanfge%})gI zU8-=)3&^H?IKNkeKTR0Jlpl||?T9X$-&yKl{XCIwyaMdC3En0=7?t-!j)Utr%9(RTBg76Ic_A6ZyZtY8Ek9tw%=Tijbdl;$O-qfj4dWu zcn&wr5$)7=-`zS#)(ssw>$u>|;8|VjX((Qw5MMWeop$!Md=0-$Fpu1{4BZ*$Q0~Ab zh8<|7-r;7(5yuo&_%oq41?Au@4)QCFG<`j5Rol`l9s5$=y6bK)gixb;U&vlu=SL>H zUC6#;T~aKRDhoThF~v&6lD-Yc@|+ApS=$D=T~cXq!Bsm*oVW`sm~OPT2TA* z)N1;!DpI;ryrbW%T-GA(&Ug~(h}{pagaC3&{7gjT}}IWj)6qak@pqpJgVN~eG6g{nAi4<7YoZCXQR}i(FgUl&U784)CRO->n zY7-7U;}O27P_@KPGviM4h;#qBfpJ$}NzIMAw|7^VT+5ORTXS!j7UpGk>@3?1YJIhqy4ChjDiL>|N9V)+gXaA0i_11ElG2XG@dX83>D|%MsnDz7 zwsg}&7IC4gV-!z504Ca!2?lzq!$JxT2TD^@3yU*b_kdi%KYSZs(Vn>A3RMc{6AllW zTq{TfWX+Zs!U@7H8xh4pYmYphWe=AP44jU!EVXn`p)!rMU=z1A*QVERb?Dgta0~2f zmFbqKd;{IWj0F*63}dSMD)S=Nx}WK@$T2m;V?|`xV?~-o+^5&=w$CR@(kp(Dm^*OR zs#_bk&}#l@meRiwM#jeeEMvzqDo?=Tc^*y-+;yTSD>?V@H3{Q+elth=^2OzZ{(?Bg zLIO|HQU}hvrhLZU&c0uh3J3Phix+Gsgx&L_-Mdd!V#N^eCKe~pnOkS{s>p^@9`r!9 zkYTdb4*R0zCPIu`^_Hn5i@~Wo zLd7k5K7$-SP&CIC_Zrht#3z;=Z9i|C!im6bWlP^Bx7UaZ6D z+my$}@-n76|9|?LLAdwhSAC)EZ?vI|2<(HR7ZuUd)-)vhdLuo<(7bXDfFY`NdsvQ6 zqY5KGUm8;DEi;HeR71Uc&`%!Qo-A#wV}0fTfpD?jQ=Kb_Pp~8F<&)pvZYok{Tq*)5nbP?;x1^juJ`DQJ$>Om7@c%ea6F z$L#Y^T>O4SeJ~1sS2cHZkV56rW1{%>Gp`$>bi2D}MwTZuWT5G&y66k~4_rxCLU{Da zpX3hdW~kl42EAGdto1iOxA&2^E4SP zseFM85fEjQc&PwU$v0RXqNHLMRozV(aZl;ZvA`rSA4U<8JV| zNcTK>(a560?9Y_ENC7X2T-5wX!p`)FUe&}|)B;wT(HMDapV{>&{jlD0A&Z^SUh^da z=Z^~saLu>-vwYZ;&dQ(kYoO!Nczc=fHkAzeRvG^XW$i87lwPdjTTxZ(=tjRNOORFV zBE&j;EYoxx1UtymB5E%iW=326jA`pCA@Zd)behE^j=esX!4bo^#2YGeQp^Ah!w7CP z)-zK&pzwOtuxCVHqHxE|EOobo=N;ja2mcDx;WV-J(&KO%0@m_^Oc73&c~6*C@HJf| zxm9NAFpLv%2^JjLU4(VdlN-5=O>(H|TR5f^=wFl?5el=EM-Ee^=Y+T08bG+!OqP9h zox-N2wP*zecfEJAva(imy%Qckeq$T3BhSIke%8v=$Y|;?U5w$xhltXCL?bE)6n-MR zK4pq7)ZEUWcB zoffdaRUg;tEiy(gC};KQ?4k~`z4-dpQUnu|Rz54IBIVf4DKqUocwNtmS8ZYTuzk+Yv2jNPkKUwH>@>V(dnb#WKiRvaZU2ML zgAQK`L00WR%JGXK`h_Wpnr6kbi8+#>AC+)%L^2Rs!E`x|p<*VvrJcL6sm;l6H`luT z1I#)#Np>!dOQpi)dz|7eM+a^G)%4ie^Lr1SBi07pILnMnMmOr;d$m5M=1+|BrxDW78po_UhjyEF;&n^@CB`~(48sv+KxEAw|W%!g3Eh|-CDL?$#FQO=3ZC{>Yhn? zHonPZo`ZI=M^%-?^&oqKT|@7^`()h7G9@|~MIypnru)-1+rcqMx&?)b^$1iI$4v5z zYI^>RLd2RGn-gs8%SOjHssi^;46^&!mpk9--HGeDO`69V$`cj&*cDC^0NiRKRN|MU z!s1VJmg;!fqH-p8zy|6)R1~s7ZLQlk0A&s-RK9OOzV6FHj@;bacXV&=TQyYAp|#?R za5PnB%DP1hEnj+Dw;1U;@#V&Nb*W|QtYq5RkmWX0*}`r9QEfwx&Lj7%r#;z2N)8{L z;XsY?OI;r*10O6JfwK05`}p&xc&G)D=kerJB?+D^gKLHpe+Ji*Wt=%qTrIwz){$QoM@4N!P#jrj0^VsOh<_MCosWjsW)8gW+YE&|AHufb0 zebOpv<4K;5oywN8P0;Z{R_$2+!LvqI-t17}N6qyduXb9eIQGv-M9;jNG8Z<&-QFG0 zH*Wcow2^GwnVkno!*KfFkwWL|()j_K1rEOn*Zgg+6ASyfkLda^6><90t0$A5 zv@l3TX^d>eKn!>FzF30L)}oZPGW6&G<)Vw6AKTpFNX}1gsTpOJv|P=}vj_4~h&-Nt z|KgQztojy&xrJdJs5mIw_o%VdodNz;XX>be_N>k5O`T5r30<~{UlHtlw`u&vQp zIToLtS#vj=?XORqVAb61ZWd8(1=|9nYDJx~d-FozzzV3^|G-=RJxG4pCmh`4Q34Oe zgIwpBI6t0B$M!H`$0^UamVFQu9<+H1JXwWhLD88Bji)%P8AlsiW*YJ--5X%Zd$N9F~jV zh54GMnm4MnS~5dcNH5=TOZD7LH?b(HJx4TdnN!82OV-?d2U{!Jy@Wq1)BVF%n|!!| zwQe(2zgpxBNdR~2L*+6=(X}CCO%5&APL72`AHD6v#m}^Nw4{PB>d2c~?RVovQ>AYa zaS7OW zc}6!~%Fn_{~CxJURXTGqm}ug|ZhG&9w*KW;n;qpp8- zX29T7w{9tQ>8t>ugzeauTiiF+4+E>T-v)+cQCstA7iHm+o7${Y=iKO)DzflboU(oO zt)8N^Z#kZX!j384YH_a56aL<$u_a#YiD_#Xn^!2HMUJYQ(7uZ1kckVZkyTa>zo2yM z3S%6>2?uuc{GANt(&UUE+YkKmg)Ut$vNpu4_o_)h>l;s%Cr2g@T~zJ@MMKgU!guqQ zAecsLrk%bK{jggW5P%W?Pr2piH{coKop053XSqL119K~B_%sOj69}p+?UuKq~ zRX{JHH&?`cu|l^&TGS8_B2$FKF_M8qaw5F8t~W)Vlpn}nVHUdmuGfIfbr$fb3}#Zf zPK?aC=hEL)t?v7jHXgSc3wFZAXLkdi9%|2H7<%aLF2;K-Z9`GyT|NI?raDYHL z%XBuB@G+Zy2`hvLR@ek!Ypr0dk{3L%y2!#r$JT6@x!1a{W68IoK=9Y6-i)s1{XhPC z92~^zn7$Vvo;n?BU~J5Ru2!aiuxH*t$Q-rq2MqzWu=rd0ZMSTL}exBCV=Q(UTPp(v!;#xldCI0>C$ASjq#-X*w?@<_qpm*%d;g-v**or?jy5SBusBfgTuTdyed?K1}6fO0p z;#FlPv_GHpSK-B(g`Qp#WY=?nWmwD`cLz~~S@WrSf6+3IyA0Ro2*S0!z%r@LASlml z^YVoWOycQN3+UDCi^F0Ve6gNA5+azO--rXZY>#YRt(I==(n(zUJq^$s)`xX)?;+yX zGW<%?Reku-vQuTDWW??**gsi%Gs>Fu0qvN9`%3W@6)Eg;uh_UqkQbEpM|bfjgli(a$X{RPE7JcYSXW5% z;2!gRG1^G5nO{P0=GHf{s@BP#OVy6$au2QGo#@Q?RrI{4E*f4H4b-AQmUrLz4xn$~ z^4xzI0rjpyED!ap8D!UhLR>y(43K6gTD)y#l_>;Zi-~#fBbVtXvT5bTIu&%^dj(aO zMy?{$!~n3&?g!k>=9uYWXX{7h0l zpj82dS3tvbG*n5n%lJ~*XX;2)<_Q)K&qGd}?vU%<7fDEnnI3<8%OC>y>NPGlyks0C zVO1u^SN;Hbeo?Xx;$qpIbq4qN%(M!wF~2v{Yy@1UGJpr=At0y#LT#g&t<7&4Qrve| z3xL!qWV81BXNqpXcL~`UvQ5G(F&ND*pj1?N8&br|AND#me5L>FqBRFRzv~WN7wOfn zcR!rhXDN_i?yNSxx{TqiW?Z(@{;JNRE@W~n3Q~h0TQ{$)_GE{6k6)U5n93yhiwnTx zpN2E2Bf7Or0V*$O-eF@FAbvuRHl4O8K4ky{3A3-|T3C+hprsGV$rBbi^zHsc`pu6X~H6TYa(0>Jr8F zT8q;My|S7?pS~nXlfn)GuymmU)Dt?fXP3GXo&epf_jR5&ziUE(w1X6M80>x2j6eIY zZ}0=f*7RXeKo14tl`W&?PWZzs-)#ZcS;+UP#yP=GFdG}T+O$maimZG*_9-6{T%Umrmvr0{Vvg!oqzuPc^Afeps7Uij$Tku zA1w=I2^4Qw4J(T3gc_|UNq$gbLw{urd)JMUAcH>B`1Dh>zbW|~=;I%I1D>z|z?oU6 zc}cg>JfO&a!f3Vj$Sb^h(SX|EDHZdhG#HX3q!;C-s;UYb_*(C0*7W)cTDc35CqEDy z+Tta-h}s%3PJwMLvTx>}JfQ!3z{N3mmgfLVo*5c-QSopAqVKTS%j5?1)20FY=34;t zX-W|H!)tS8r$ibA^J|egK&!<8w6ZsVv{J?LOd6ib9x1C1Z78#5 zi#GL1l=HeKoYrJ8z{WU-a_d9`cU=@DfW2HiZuRjtX0@AM2WSn&pv`~q<=R+__a$Wn zvY#E$L!*@~0nl7D!4}9_go2U{8U?%M5cSO5#bge+st5xQ=|IZna3mo1c?-bNtzca; zSz21|Z426~2w!dtpkM&Hj-{LCwZ`M_jJj&w1=BjWBPkVGN1N7e+>e*W{&@Qd{netP zr(}7NnsnC$n0u4vk{qzkF&g;Hd4CmIW)s1PJ!-IUOAW?B#mGKCI9??fbO0N2hnJm`*1Ext5uY#fxDs3>Bs!7W?GSU_5#9WK5ugzXvFiPXhwjhvOR4 zGXPYtvtxj!zkTJAZAhddR%5nAG*Z0ZJ3@r@@%Cf-5~bwc?DJo@`qS@}?h~9tYzugb z#;RZoYrG7^do#b0ATUp5hgB!f{2j%?kzfo(tYeH z!mom?CJPtY8Q`c!LEx8mxpje@Ysy?V8b4gV;J^0u>(}lAGkME_+?bnIy)t%H-=)wC z2~h@IDEUJ3_G@%>NnIYfE&yU|1U$KvB^a-mwD!#Y{&w+$M$@=2J<2KT`RArIrLKOY zTB|W~U9>QUx3#f0Hwl?l6%W8Pe5VuO~Wfvc?AXkRudp~1q1YHfvpF*F6p!rXmqR_z|tlIeX!huy&0Fj zR%L7vjZ7_ixx|)dduD=`A#3>8Vr4=x{mwf}R+FHdYL0MW$V1_896TCgM4`dz+>Y4Q zeGaAV{_&-yup|D7BcY>%nbiUdLAh^x_R1NF0~H!yh`!>S^#6b4?*Q3mQQ%=6fK<*kdT^?D*q_W3l3mw8v7mV)GC zFZMNdmq+5F*fC23xyi~Yu=vcq&q5K=PpB6U^GMXim^E3m+^b+4RUrYQ4i1*89L}9t zNG0sCmdi_vZdD!g@Rfj1n-df*(e;KH_RyoToN(wg-TEAn_8Ze*rf`_3l%Y$F7v};zZ*v0xPm8rPz ze#FWE0;e@XaDcA~o2rOd4C=6Y1X>d5+2S8xS@hCivJ0Q{I#LmnotzfCd>o^H%aZm#lsMQm^$LUBl04;FHEsL_)&QudVkipq~?1KlB zpb%Eld^uu^IvYEC`E2h6=c&ClT+J5Nvl#Y5N3W7%U6`NL#l_@aa{ecNyeg)pg41Ek0$lC~_3jNoY3aW}MxpA3+KGc2jN*YFaS-bv15Ro0f|vOHR@ ze&#Y-%##1%yh?)Gft!V#RjQql#xyb93&7&tFC6t!1Re2Iq4QVB`QKp4-vE4m~fg7TQw=RC*SyBNJS}3mZ^; zdFQ#ZcV&^+7iymA1i1|A`7MdAdJa3{THB{&h+C1bXW#Q~pmr{^VUs@bVTe0wP!S#> z5u5CE3HQf4t|9*P3q+0JfWfc@5dE714S4~dcdoUYuzU9s(fEBdW$0Wx#`|b11f!}f zA&2zsx&f>fEWtyKh-G*kjTn(}<#2DKZXJrt*DX$9jV0F%Y~u8?{ITcOr#B}q0OT1L zE^2R|B+=`5KmU0(S>hAfs!T|>50W}Efa@Pmcojfw!isKoZsjr}rk8;{mIuUKbeKS8 zy4B$xZCy@m>~KR4jx`GTAs1B1e1e~Wqq`fiA#DDTF zj6nPAoTi=94+caZPb|Iu@@|pPl+?ey(XUUVrEUA(sQ5cM`O^}9|Mtf(d}u&2XYyy8S`&=5VvaDVney_RM|0>isAPTDpJK$&7jNuLr%EFzk zr0qYw;T+@o{1exf3Lci^y=OUQ{V2&f9df_FY?W9Byl703>%06SL$zlm zlOaxsi9z|gtd35ZgjA?2@)puO=DH~UfmhiCMyH>Q1A zFa^c(8f9;?n~&aU^_PQr)(Q0{5W z+Y?E|e-5kMP7d4b~+)sEZ}?@G$;hx9{t3HJ8d2aKC&YS~^V`4P#Eh6Si)$n|+( zXM7d2=zJG!jf|`12zhd5TfWcgMtMo$V^LXP2rRhH9VbwA|FD%)5Tgt4zV}&Zb#bJc z+wR@pV72rRb6P^>jJUk6;TvQ2CWqI)&+`vnyiuI(*?})ldT<@Nv)UgeVBD|ib+pBx zD%N0cp%^N-=GMf|(ux!E95h0ozdM)w?~enrl-MN=wqXGj{Y{^@vgieoAvbM$HOToa zW0!~uI~r~FERwQyYIGL+tZ7#S46d_;bG%ub>j=qiQ?ML3R9hI%Npo^h-P6w1xf;cn z8(Kj~E{8m8cx@Z?xqRbdNrBgjoUTPxiH#nIroR!DVx$oxX|Wx{n>VU^XVty2V!7QT z0e>f0!`s2#A3kt-wxkt!j+ket6<3TR@;j~r?xv@@^T za;&De1LS;kR$W0mRvVI)j!wM7a6q~xj4ch5qLww9qcM;&OiEEthJxQ6LofRRx4Q|)_on^0967I@+WZ46hMg6%7fq9x5X+1VuGKzOEyZ!KBku5{ zl803YIaLmA%l?75CBv`8#2YP9G_v6?bMioQ=Z3{3sRaS*yO2vt%l5a6wCmc@ks&9D zVt&u5iYYufS;B4hJ<@;td=H+`c7@S_DWY{vZV!*dY31nN237>(yLx!p4`~2mM}7ch|e5o!Wa$?OeT}R#+-h1Qgfsy`z_^bUCu_ zj*XKaMHr0MIOTf2z-=@e^u*laEykz)3xDB<>y=S+L@Cc>ZUsvDgO6r^)$>nW}I0zNm zny+^JxJx;!cyMO)3Y~P&+x+$Qs>-KGUK(5q51q+@xH)-Ljc|o8sd2Zi9$CG6tfCsm zr}Bx8E9Yw;U9weTj`ushszRs6e|nExX|6EI3qbl9lUL?)av=$W0z}BYp9f z&=ii)pf&07b>?#YiigV!>zGlr);eIb*5(+962C3pFwf2i)S3$ls@Int_NJ4L^p@Vw z8Pk48?ktKnBG;p?KHh^VrC}W82HSh<%@*vS z_a9pl!;X2XbO@_h+R|L}hGyM^dEwIe__&R-RCoIf)vQ39u>x7IqZ#R;q6aaW9G8|k z90qJX{q>>xe95}6v>Z{pszZg=en#FF%lL__)nZTlW~}C?j{?F~l||A^C8a{cI}9sc z?h~-@PiYTTTURO^cqFDz-Wxx4W5_nBG=qVZqZ?j}&;S_+kC=1{ zh|7gr5$E!SkRR5|Xs+l8@rZ^I9!i+xk%XkVJnVL*Vrkun(x+_a)2c4!`!VN_wMMF_FeuK{Ut>nZ3!Li2j-? zeAOA2rZaJEuS>!o27^SST*JJaz$=E<3A4*KT)s-03tt=K!rlD5;r$?GgqP$e9>vV+cFV*PnhLFx|s3 zM-Klw3^TTpdr44^G~e4xbK|n_AQ1LQ-yePV0@)9;aqJMm(mAZd#eQ3d{=Qfj$CF|;GM(#=t^9|cE?BUuID4pJDc`KdU zPId)(H7JSh0Jm)o^26!N0JYV6<;s1LtyE^-;~ zDDcE)*3P)DZhmNkeV`$~kD!R3J5+By^KGZuxRyiXl|Y8iqmSQBV0%M1rn2MXhT}!V zJB6THYwFONIKfuE*_vWTo%}a-fwqB0Gnp+s)_uZRky+)O-aNCN8W|(b)myc6R=S=> zxvS&yY%Pb?m1YZx6NrLMqS(d`a#nXefEP}C#)MW*{u!h4LW=J(FIAW)#}z5YWH3T- zc58h06mG=bs4+INdJf2@T$Z0-=o7!4LSbK{c)O1?#!YPuMwr~*K(*B$Gcskc-Qn&$ zDvf=5z2viPcVniJk1&k?@xzX^c`r}n^7w^k9Z}gc)s?Ptjs;Uk__<}7mFah-H)|qe zSFuxI`VsZadf3rM^RAnsRM*-n0dR`XruP$ibc>p**Oah_k zRtOaXh}F1CXq$z?gHU;XoZTlrc+(L~+oY!4jj#_xgRF5Q`O#MzA+-`lO3khS7nJwG*l)y0i1FNt14< zM(4=&t*e`3Ba2?jatd8;XQ`UXjmzwLaG^HWRVtN)vpyqt_zw;9dCST<-SXu|!#<%e$TxS-z=}+L8y8Du-$2smd?%+@Du^uSKmKo~LwkI)Pzk%k|KLU`j_i zP8%CF?zu@{ULbxU;%JN@i=A~h52QCfR7bUS`dXD|A@yvv>*tKRGhShT^0ZdX2agw! zT6@5~GF07a@DOR(-d^FjcQ5AX`S-OJo^V4(LqVQGqp=EuYtliK{Yd4@)%c{SUBj9J z@Wpg@pZHJv3&Lr{MN?n)=GYV7>FBZ6)zy{a zc$EUn(c2=g*inU(8QNrDn!>qA4Rz6Q0YJAP~Ps5r@kZPr2385K0oZ0 z^@pAr!2$KH{YBscD?}8loQ6(bj@2NP+!=_23R;*9eTD#SYz&b(b5_IX&7n-^in+95 z<1{1B7{x6k0T;yc%`4B?ohB}|)GN`D%y)fvwC?DknQaeai+Z`O5a$eg*SSH$+)}o4 zz1$;1v_h_yJ$h}!sbO z!un}nYAVbKcnRI4T6`|C4htz+%1-COw)KsL}r(m-b!E-z2bJO@F3~cux*QiAu5`O zHF^`&b1-$+`QgnM8e0(=ZyozsnlC$cjix>(Z0E2tS+O6kN-0Ktf6A9L!_DR}5estC zHH<2GV%Z292S3eXm3#X7exi?ONe_#jJ4 zL>J2Z4CE@(%j-`QxoK$A@z6`!ExTo)ZXQtxD|hF{jLg)3H!=Gp{siI0Gcbr5B&0sg zJmD~vbN)PeW>D1n#6Um-dt>vVr@PsdFAvvzzY*8UtD|z(LVye0m|TAYI|3!YWc#3w zg56HzYxMlO_UiR!?7LopLJ!w!XP#O3RkAkromCD$>KMG?s9U*d)-p%4lpw4rtkIc! z6zKUOQ!VMUVvgCRWwzHbF*#5U4u6BY;$c0&q zC&~K@Uu_y|4_;Pr&EisdXwRf;kIpJ`9mZ&?6p?5@C!H3xn_J z)RgbFYkvLV!H9-t_L(ha8&8W(wUS!}ifqjU1O>P$Mii`D#WDa|nz-P|>N*6nB-wY#GL06l%6Z8q)hDeVvGNbLpzBNL4G^z?z_yD9?qfRCqO${2+7=OD6;qBdF-StKf~PT z*r&c?J+a^zA?*DEvFa|L>uE)Q^QMA5i+ZX*7B=?Gh`n8z>%7UvXzQBH7I|@-Vg>1) zxz>u7Y`0g*OFY6#;TP_$@R9beKR0gMm5qh1@?_|>-!P6J2K8Bu_34*c75c)M?Nf3T zjiJmlyFF2O$z^h}tlo8X?$gXBMo{Ooq2~i>dc>fbae}LED(IM0Q(Ak=q|y^>)qVW| z+2N=As$!Ph&z?SgjyySh2-yc7|Y@fMCv4I;8U*SGb_Y6ST+6bS z{fVK@{(6%&?F}DZ24F?<*2>fz+|3x}-@Rh_5Tr!*?(<&RM&X+dK3Cm1u(y^Vw5}Z3 z-FV7>CUxIcX6uo#dPTxfj#gN4R``-xY~v+de4}$XTotw!?q7wXAwRuHo9nNmW0S2Z ziO*XWMyeZPbr{}RJEU`zq-o4IagH=#L+lH>s>_Mec3~-9|J0gG2%R_Qh~A5QSN#IB z9lpIETetISdr^)j+qiFs*|kDyktlnl)HV!gyvdL4&ykS7QxhK0;A9aJx~*}lBPQ8* z&wrv;I`U>^6Ch~uJ*DYQt-iSv%0|k>tfHo)COlh0CM*fMi&E55GS#w_gu}gYltkRr zx_f#gwu)F-ITt)cltc1-uWLGs+-=)Qg}v`h*PQcTd6@f_FTB(Sny&1sh>UE2=?RV^ zEqd-aPn6hwyPR+vd)`-=SFYEy)NJ!KRoAWB2i&Ug2d=^ULM!9N;@@^w-ZZ6eoHxO0 z(G1dcRg0e}UA-K`6qL5=xmk6%PsTaoTC%pVu&_T7t)?cI+0c8g4`0Y$&B-j53s#qD zQC=x~Fp{h`ZnBIJaw*Me2<_~NgMZ6RqiVUEt}JLkzxadqUKAGxMaHMD=09l+(rwQs zjd2Z5Dd{Fc7KsoX!cJ*IH}*~Vt1JjXqN!#c?L1+YxQO^HeZ;@ihY|I zj?cu2)hj%=D!j;=D57j7b9;-~>RSz4=H)$4Nm0+Cq&LL09ITR3QY0mnb)IgRUgpX1Q2MM7{{3$0CNz+HE;|&K(qO*0YrAf?DGERDn zW`k%&mrc{k5C@&iSBdw5Nxc*Is1e!-gly&$|Fw4D-!3xhmqfEp)a4@@Eah~;k>E3i zJ^$TehehV#7|9^S9Pf*YMvR;6Q~J5)g-!XDzz`l615ZJ=FRE4=hg0Ff!l*dK;B!vm|C0r#E4Bs zzGrbsq7OCF*{{&^Oi{{zbHI9OTvx@>jx4&>c_+*=HtAMtDl1lp=3hTU7__p?KPt4@ zG{By@O97#$zcU^_>#3)YRWhtph0Ttw;(0w z9YEmBFBUP-sTIya%cB3jif%nTI+*(PO4^qiwjcM4E+)ih%X!t>)W+O#4=@1UBVM1b zw%6)H8AQx+m2L+5aNxY#NpXwYza6t|a0cS-QUmLyIu3KQAmVF_w%AA8jR#CtA{-#9 z!^-tpJh0wIsEgHheh$JhAked;fUoy_Ali)DD#^G-LWqO0S#xu(=bOz##6rBQ9A35I ztDew;m(q1_cWQLSH^K649qkZ=)X@I*f{Xah1x{3;XtKAc>iWYXgcwyAD&F_<8Uagh zB39Wvs21Ajh`tLA*sF5MbtPB}fBT9a>l_Xi#@3Lw@hKHq!Zq10j&r#c-bJc}2_%=! zvod8YI8G1B9^Jo~vyKn{bRlEEMk63Ymp*ZD#Plpp_@k?mz2%@#`O*jPQMyHe+^px~ zi;C~49RMM|*=m7&BTwt!DFa{oj@K&2ZZuVoVq->J^bBBs<+vWgxMYYEFU z>hAl70*B;X44$I7njCLg^I=@iRM|D*&(U7gylK1NNN+{HyWZMQ6E->7ttOcI*o)(^ zP~|FQ6+gYMG4`Mlfngh^wXL&mvX)?~OX{l7a(Ty#ZPjy!;~c**bG{i;ENB90j``>? z>UQBAjzeIF>GmL~Hb6PEi-;4Y7OOGpVFyt#v0|E85-}iZAWf5tgywy zV3fB)(kWEy=NATqN)n|hOvjPTyQH?GJBx=5R{*7Q2L@3eg)(+^^_#ZNQ=Q)S_niVi zVpScWII`IfOM>d+Y7JF~kJ%w6H8Ds~>eWZ%hYlsnLD1|@! zSmGv5*mlOA(1o*h#Y&u}DjZ~1Ac7v$rKM++|K`H5I2mUp{YdBecV;@`uYsCtd@!kZugFa0Urx(9 zu#v8#fVBHl=f=M4>5i$`M7c`85Q@FXm@!w&Ffz?zc42ojltoPVc(J2yi(;Re*mDSL zi$+@HZod<&{<)fiMrciU$Qcz9(L{9>{!8Q}p~g*qYO@K+)vk7f}0-6~LakI@trb z#Xa^nRQNgY*WK2+ZlyUf0TCB^!k-%+jtzVoagFdCv6|yV9pN&I=+c2=9RXx@%v)!B zRgl%)-!-&%+cRzMG@_KZhZ@U6R}_0kL#=vGBU2Z2Ue!CtB%3SHR|lYJF6$z&cInn4 zo(`_9foGx)@Oo+zan{`!UtV$(MnCgyMaU0bAD;khuTf6f-(Eso0LL}WUo}n;%)LA1 zJ-r1g@govGoj!`}7yA$|PsN?d<8(UDhb343#*g(lD*5x(hT z5+OogH}@fpU0PJzH$en*f1~F_#Pg>)@kUPshjy^bpX=(sJ_f4@6ghorP&cW6pPGLf zxH@AracC0*zl9=xoRR<4B9f&jX`0wFU_5b^ z*EIg2are)GfG471brodNfB)ux-F?yty;xR*Rj&N`>wkaOOSHbuygB8utNYuaPd>uu zFTI{}`Pqpr*uCb zL3<91T-!b?ij4R?8BTjjz`s5m{q}=Kw2UJ%CHB~ae|kgFx>ksonApBQ%B{V1J-WjJ zAfR{KQsS?Q&;s*5!w%8xf)H#q2mEmye%UP8lmG9cLI54R@c)PRT7O!%nnWR4{fpe| zAFpzU2!6+1bm`)8KN$Vud!*l-MOy@az?cqqd{PV?6 zm@C>qJXFC(%V9*acmjrWJkQ5hqQn52L_+Mp{70iZUM;(5{!!y9=HCCJP6nop+>ZXjhc@lc z&heLOt^m{K%f&1{2O|4b$p11RC{ZvVGzJnU39gQZ;lvdj`bWIe$!+}q#e0F^+l6L~ zaZ&O=?7$rdYa}VM_b2B(Rux-5lUD>9G?e04d<}qxM$+c~)p+q^7J((g{vV!cuxnqv z{A;TJyiUNGB#AM*>-0bqpvFx>C?hM|9-?8Qo09axlHIY83|OimEo<-hvR624YNVYpkU-Xc70a{0b4VW13-z<^02A%)q7~O=m%)) z$XLE$r(x#Fk~)XgdmDAaqW=D>`B~szQ!N=j6T@XwQk^2wcSH@_xISb`t2}?|7yhNy zO8=1#*_ITT5DGj~a{<_@xoK=LfeAkyUF-apsJm6&W^rFCwW~Gq>IR43s1?dRCDfM* z{qxBQ9t<(0jmPu`nH}aqQeAe17HMDE{Pzmr=10Vw(4IwM1INd^%(U-puuffgtLA*p z!Ep4(i;-*elcNiUNheEPrr*mSySuZ$QQl;v$5jes}p3I9v1|djts|{HN-se`a;qo zM^6V-zrcrt_zYpL*Ij8ei2I_N(A|b}_{^^x|DE8qb*!@tD}4dEl)BvO~SIf(0yOL#7QkJ=%DPx7B@R5`?;J`vgF4w~fp#==*OF z=coDLomw68Ht0N)K6_{~uk3ClZS{9Q?~$4hbOQ!D`c`N`Z_-)q7XmPOpUx*B819U~ zf?Qt@=bP7NN&OgMGYHyEG1x(`h;$&m{I?euaS55&*vPd^$7@g3GTayW+de(XT1Vp+ z!=rOn0uX${AN5%lvS5f1<(dwz+QQYwMefSRbe5;lh}8yH;6;;SxZ|ep9WiVApX0ga zpa%=f4iIE#veezS!YckJp82-{;#`LN;Gl>R`0tPrzE@S2AWM5ZCsi~q7-%5XbXNbaZaWBFp(v!FZ4znca18y)qlE%=jI_^WV`Ah z5jcJ;Q~PZ70p#FP+5yhRTA#%&QtC^Iaon>)i4ESDjui` znz+ZM6+2o!2u>M&JnTSHQo#2dZycW;mCW`3o>d{n$#8gW60T)rHfjOd=H3;Q2)JZK zu{H9bp>1=DK3rWk|M{P3&aYS2#Zk|43(FI2X251%qfcgKob;y7F#_yXUDz$r z)JnH_*jT!l?jL7i{_!{;A{;EFDAmhiJZWX3a?%GdoSJ)V(I3&zV1RRd2wYRTc45~^ z?e3R2EP>D4U)o>byg;Oxf4TI>aYF$LGKjaAd&>RGKg`qU67A%inS;_FqtrTnyxlRC zxD3lc{=>)0L?2uOMJPe`7^mzaCom4DMl-4ZAvT=E#;CGSY9~LfftAuke6#rNbpo^S=@<2)Q&2p&MZYcmoF z{^v3J<4^PbMDv9+W!W%wCwnv0xh%GCHa9iBwj0=b1G*lX0l<7Z{MqKr;G6pHbXk1M zo{}dwp9>%PD}er=5a-XU ziu7jqY`eZ}{bxlyA-h&6g;~Q!gIb;urscp_E?rAXqt1|ID!%*~ zGXPv|<0PF$emt>f3M9m*$C6{@jnkQu9pm>A8{CVnr?6Q-ey`Mck-1 zXgmM%ZCxgt(M!L>K6AIjiU}ItZf5%#j?4kZ7mEsxB_AIH3A3yo2Xr^bBgNS0l-`+# zK{`3>m^*hfs^0}!b8rAo+F@x6=;9+u#33$IV(bSl6PNg}xJ<3&P{yFr7e1k)__T3H z!gDKBnz&DlA09@*mpDHhE8BJ|xwC95$9J+yM|J`G3%;oktXGcp_!GXl`QhwC$|%hw z&+QuNrdl)<2vW7KCXJD{H32;sf9z8cKzXuh%CIk=(sgp^Ez2LLYsc4jLwdv-Z#r@! zBD!SY^s$&|^%;&{NeS8yNnmZihGZ~En_!~}IVd)YIsjm7T2!l-G3< zzD$!deB0rw+h2Zf57ac+Jd*Mh!|Xq%+0j@+btRc@-ewbA&+gnB=I8>h_(;6ko|bdM+JqG^PJevZkTpQjbMsk2l@8FDr3=yU;gYqf2Ps9Ex9Dt?Q5;fjcIY*Sc;4 z2tb!}b7RUKC&3?lXMF-IU zlq7#<2<#!#pI78G|JBFhf@ghaiE$16`|JR$wK``>QR8&C-wBRw9hh3*jMxu#Q_t32 z-YNkuyy+~}U`3RzlJ9bEC1;lYwP2Uk+|0v2Me$Cg@4(!9cialeea!EWZa|j!Z zjwJz3DiFxL{opL2*B?<9=|cDDy)U4@aV$q%0JTh#h)?f_|5T^GV?@Hoo^9^)_(Lq` zRVnuADMBSa_C$PVPEZG$Bp>4RmJEL2^Z>|Th37BGUqVgi&dtjjiv5pL0Z3?crj?_| zMd*eE_b)B#3qGC5%}Apo75d?!syL!5gZBtn@k_fj$P(v&1(!^@+F?c9a-PeKFoz%; zZ$Q4J%3T9pmQ!j!vo4hEwD28OFre2o?rD~*AuuO-vC`ciK2oqPj9pyvk`01FnnP~d zFJ0yS<)Hc!PA3W%D+tJRiGLOENMrcNYQOY_5;*&N!khPL#+g{mFbY6*)Go=nvPD&> zASg>UDJpLSXtg!S&o^z3c{loa&)9M>;w(ilmw)gkHcH-Me^|{56-OD*Z)ah;>-H&O zkel@AND_!f7B)iM@%?wEn~10XN_75ny6e8aT;%~DbOz;(`)Q_J?R$lQSgVNngR?-j znTtzBklVAc+Ix#?x|L?H*uYz{q^ z9$iYp?z@aTOiMOQDtB66tj)DZZ6D}^++3=h6ZNXx8|ZR;JU(=9*dcDig4{m|cG;>q zKY{@T&?fsLFa^|-Ffzo(9v)(P#AdL}o)U0Xjrnd>S{?O+l|S|U`jg`mORykdvoNmq zw1tK2xlK196;(=_+^`0qn&gd#Q1<)ya?=(mRS~;hWaU*1t^;pY&8CkIyqGd}`vlIg zpi=f>RP^+8r1n9lVPQ$kz=OwbRrj~Q>vKD;ACp&Aj3l4*mO?ye{B(zhUJ7UmmA?-Z zLWg@?vMMM&Qr`SjEyCZPNfd1qNd41$aZ_oyY9q@8jIJw*nNA_A87yI7MzEOnCKuyq zOuov_0yN193%dYcersj2RV%GtV=L-diY*Kv>_2T%MfW?xsF|!&G8H%(qfgJ#_Reb163S^u@IU&UvZ>N_NKU{mX!W1uVezAO|px z6nf_$9)FjMkKtB+K)Bye;z|oYWaP9k9w|$AyZaxVv1)HU$GO@qx!T($_Jwv`G&!gH zenoX<&!-18kr;r@!)cTfRkK@y>Iao`txiywP;U+2l?Pxy71on8@yG&7hM9rdkigGtWAx!_GYnZW>^WBra~c{oCtprA z-vN+E+c((mU8crAFhXwmSJMoY!4{}iQZJqsB=ss?G^`uNr0pdMlmLTELP?Z%}4hrf+#yed#Ue~4(;QJf%SKeJpI{VgQfxaMEvi&*e-*4~&0j7Hw_%_Q=MARP+*8e^>!~r4V zq>euVNEKl4qNz#7PP~cfuUX9rDy-x^#i6Nx7O>raLaUFtI{Zz$#WEs8pp|-;@bvf#}nkwdx52;7y));cVNERlxDG65h+5a?%8qA2| z;^KO12qzro*X`lBwk>X0pIbt?@8a*P0X@u4Uei&)()(rKtWgDw>rz`a3}wMgZ9wq; z2j?Fc?dR<4=Hp6rrN-8@Lvh3ZgJL{aAp)%Zi0EjBOvTukl$4YQNCid3$o3#MW_^yd zZm-o<&ABA}N>_>ToEW@*&$PGbiJ>bw`-T?=vZHYduPKc)fV|Sj3c2t~sw4?B0Ls~k zRUGi>f88FDv(rviCB`c`JVc-iE$uBuoepg~nIAGDabK2uPOXJ`u2rc~dWmOeTUsh~hYfQamI^9HFC}!-fnNsAC zgv_w4Lz<2bp^~}8ul*ix4bZH%bSXN05_I`0fE0y0u1ldmEln|0^pJo=V0o8KGqRCE z7RjyT7&Bxq1EQv_=_n^7$o~AW-CUVP-^*ikl|Pb(zOXaaqqT-1Oo`nC3d)MHPv}9> zRaJ&0+B^(=$|^|*+=}SdC?d=iXX5}agRHocAn0=!`7_Ot(_?VN;-bsipe~C{thn)j zmYqCL=C#4!dcFZna&@r}3}ahi;rHWT5q|3d1blxicmtGZX|Ny?=w*RCq?ndaz;us`+u|^0#m1GU8h_Ai1B12l>qU$SZb*CE%d&p+p^)nI`$H zu6;Gl6wOS1!a!p%ear8zyu=|uTC@Q^2(rq#{uX@*mCp*cGVLl{QLlemR%S6KIqF=+ zhN2zm7g*DQ8899q#{u6^!OSe6Ki5!ov|{NFJBOuXr!$9wp+S=MVG*`nx(@}5W|&nq z)%~u4|HBh!bNP!WzT)uxVFa80X3gqHDhe%0Ksz$TIxXKeZHp=n(YYMUxo;nmkumL6 z>bMl6%uT4dCan5=!3d6yU&36|Cn@@=u- z)ZV-Mpb0>UyE}X~yeY0~pKOG_(tIyDItc2M+f4hvgczMH(1S39MDYt<5xZ?E6k}-*vWE zdpWs3l09YG+q||g!|th(shKlQKizp$?|Pe=3(N)7yFO`;y!StE-!SW}ZO9EC?UN{< z^c!MMbUM&?oC|DgKD=6KtXhL(gpsPw_d{!02JSuw7SIo|ag-dRcbtQbZXXTafb_yK zC!59}@D0C+%Wl(te(0vLNGFp1L{A8Z6B|{zOoG5#nD~L-E&YnsLGU@7N3bl-{HvxE z8mSMW4BXvC>I&3U=y6{^Bm3o!Adc|-g3NS*xHl9}5X5QOclStZJK zd6*S))%npvR{hYl2x5#x$n?JHIY;4{yxj=f&D+b~cnbB8>*_0n-O5a$Q?8grPkyS? zVmEheXx}#vE}$Y?b4Q)eJx9GarRxB!}b+#(0>B7S8;Om3YOP&Ud1G&Nn9N~G-wU6$o zIiOj0motmOUqWsQu?ZLqscpMyl*9DcWu8*=#~8}M5TZ3MEdkDX)9+eoU&r&PW)6Y8 zM$h7Mjy=YoEOz+*`ONNAW%3<%KCRhyoI*T!Smq?z%`6PnjI7#{dQ?dgM&$tIb`3l2 z@1bF$>Ao5p=b5J*Ux~d?2fyXDzN1#f4KkF=r%yU!g z8sQr+vvqxacNn(=x<#F4=TMW_?siR8m#cEOCTBB>t4Z69Yv>*(9!Hw|amIbu zjByXe1IOHyiIwg9=_kzK=i5rT&89E7e=nBw$SYk#*ww29h;HQ$6t;qW5?I#-oN-?8 zZ9gvIud&&3*EW;I(6d&d@@`Ph8w5<3%YuMWmGy8L-`cI-N>`n@t&t^oMd)=5w00l& z&>p(2X6m-yqcdGeD;_>raQ0@;U7cG$?0dkH@Krq>g?^*==nUe5+Y)vzKLrIjNfwUK zUJYvndvhWZTPhlOrs4Zv3hlO zlZn?#Y=fu9Ue6YHuCdI!A{%NOfTrSsod> zZ*6I_8prju!gIPZQ-5=*88xI}V$xYDSfKPPq~|M=l#L#}How!1u*<`E800MTy0Ip3d?g4!*1o+%u&3u6qXo))fMASv!G z_C^wUdd{@Xm!T_rMAq6?{hhiwGn9%9hHJxLz6zMXk-LprXL9V4-Dy$`mCH$?t0RP8 zgi(V7luHymmlukK=d<~1cSlcKd8$#0@en~+a`-E1s@&DiJF~8JoS+2)f6i9a;08Vr zOOE@6wJP(B4i(SHv=*~H=`nKkJH3X>%D9ZG z*yUzfl=z-ozSGjZL0K!=@(Xv*``v<{jT3gZEHw8jOv5I!{SeJ6-Di)MQ}3o_@N4W|>EEK`F*aq%J8&*%G`ny@a zZXW`BeO|=6+K*%ju?2SMix9;=1e>TQXX?>hKqA1jn}zuSvmkqX7FK)v>2MT49kj zw0o2$-2#e7%tM!^G3AHk2_K$FYgT{Df>7;0vsi0o zevr!7UIM+2t%g6V6R_&()f01ijwo+`0nm!?bq1QEW#NdK#RR@g?bBD3zc!OaZXVa+ zyY`G`7+~Fs6Z5Vv?<68}x$p>5#Huh_^u*r_Z}YOu${X6B&f~2%-}0j(`O!HdPH{pC zG?u;ZbajyX%!zBy&avmrCbF^tl$+p?>yo#lSNUGpb~cLTP6bl?iDO~kz>3<*T3_wv zzd~9QU~`+@R=v~r=`XmY*Y1qUR>!_St3G>u)v^Ck9Cqaa%>~b)`C=DL@K77R5!8)+ z$2+^_xfh&RvMD0~Q9Af^QBV{A?d16QL`VADm;$}%XS)kEDjQc6o%Fv)oGPg$w-bJ#7ww z*++?j-`ttKame1;T5npVL|a20zQpWnn;Ok|R`*whm3U*30dHI^X4GOwP zg{_Hi?s4akE$?XuS8G}rXJkERMdYfg(#t6x`JJ9XL-EofEP7;gb-4ITuKt8a2tlZV z|4KJ2@Y{-AIm-hb)GJm z$6ok_BA{UOX_&}Iacp2+z4u;QX_pvlFe|J`pC^a&ha^Zo&1Aarr}qzK49j}J2O-aJsQAU@<9UEV8wC>z^A`+6LV`&@db;0E8nCUtNmd&0Y3g8QYrj>?p2DQU$8=XKdnZk23-t>+07YiDsu)lOJ^jGEzNmeC+roqB)KWcEl^~ znuXTiB=7N{aOJEsZ6V4g{gLEt`)g)xZ8PtJQ$<|?TB?l*=lz=qgS9iA)XXd~w)H3%&GhBW-{PB1Te4rt#N8`h^Bx=Qn6CGYCxCh~il>lWOe$F|{jeK;^3j|lkCO#0 zD)ge-WY+nzJUXeourk@S<(68GdPv>zl7Wlu8lq(MEaj0!Z*4Gjn%$}leEGR?ns$xh zs^jT8s*KrfqU1p_9>LYEEpw>o6Q>-}al3({>vnH)v~^**$5O7RmFahRI(KRCkgxqx zwnnta(@3}A+G2)%xmu0~OxQ3DiFU2bz98^fpx$;yPD!vZu%v43WAjV`ZA@D)pQi~n zhW?Psi_&eLbxJJ4SwD2n(`Fd@tRq#Yzxs;XZ2|tK&w}-ak8>QfbP?K^{N7uo_iuKT4 zcUzVO?&DJ3EyfZqWn zZ*O;VB??Bd`MEZaD|_`^%8NW^xP@H$H^hs`#e~&wy6?MNjPBpg(ag*}^o7jU>S~+w z2tC@O^XU4-IdV2jLo~E;0xOKY(FnT1MU-TwKQ3BPh0e^JIX9XAni*5-H0mXR^mfAM{+@)?sGAYwll4lThZw~8c3x#@=VTMhy@wnxhSq7n>t}G zZfEpT8qwQH1^*l! zV);ECc~)(ksEww31%L}4p)RA^Dq^=puNrbSiZ!%`l~0xYP40^d`9R7VFNT{2Y+d&k z=D*DuY!x)^ha?PjhF=4RM5{Ede_+Y+oT{jS@@L79EG24f%k4Y#=9PSDA$(pJ$*k?M zdX%{a!70jIZG07RU=cVSi*Sr$Au!NNfl0+@(y@ribk7F2OS63;e zmu0?4oV!v(O7UgOj;bQ9{^Uu8*R^loz`o>txN1~KbW%=UUO}GSn}BKOZg#X6j}Wzw9{M?lreOP}h|dr(@EwsPzQv4OynTa5~FyKD}Em*Ym}oZ=efMJcQ1I^{l;YefeVJuLGx{xi6X342e6^cw%_4GPu_E$PR}Y;5vlkP z8?8|PB)w^L@7z6&_aQWx44b6^okWJNbM?}U5D^)W+pNl(-OhLI4+EXf=YV=!1?|Ge zhT}*ZA4C21ovHJYX9zTAGT|Vj(3!Xe1@Yu{@6YJ?IOj81c*1>)pnjM>a&~up3U+&T zhVEC0`%X3xgPUHCjmnD-=@nV3J*b$Z=FgASz};X&c?p4H*bUiGsyGD>F4S}CVS-ZJ zy7Yn9w#}%^`lz7CXAJk$HkiTaAS_O7#at*Ih?U|i83@U3-v__s$#Ds&h+uB#pi;u? zEL#vaBLYuOu)QM~JR3{3i4(=RUPJdc_HGZKFrB0=eW&ZOxh%X`Y>VF~`IN1)D+!_% zRib1!pj)fw)2px^xrBev`BO-Rr4r##Due6D?#}EHKvGJ)B~7$=VNkOcL)gaH^`#RG zg{-!`#(HhWDv|xk<}JJCw4DC5^^&$VyO)GUlu#ceKlduxA@*{Dya|q;NXO3mDy!8i z4hx>2zPS*FEJ{t*BkA91C54M*@Af-yg$O$ko*E|1w|Dxqaq?t1BeNyor@q2N?zP$`O@OHSV1vvX)q^^_ZHqR4dZ5}lc2W}sb}`}$Uc%2nAO?8u_ci>mTc zcOr&nQ9<36&M)^U2$#1?-$`((;p>u##TNuUd^)Vv8bN&n=@S<7hiFCLfGGEeh{~_awTDr z%skh7Vy6kVo)0M59)I`#=7!r8pH7tp$KcP4>YH`6ksnyjdIsAqsWi8A$D+`KbA1>6 z{cn1FtTD|FDOjoS@e{0^^l?#cX*G*ij}^PVT`u6Ak6h~9IBG20dRSWp-($+O)X8(9 zaVJF5PFG#Vy*c@1KSwRn%%M4@Ig{W^z49c<@_Sq=Oc7?RvR<-K*~0>GX=Eokrv`b@ zLrjc>c$DSKNXqI+>8-D1b9lh@H69UsyT$YOb5a}wX$NG*ZV)1GJb$FhdF;bSLA*;6 zW0H$g17D@wT;WHJ+$)+{pt;UDySY=8&SRO!bx^lOLzh=vR;)Vo$}{z_i;~U5hncFU z7k5lX<$7o@5#<2dhjYL)40lMf703R3{-fyh&SW6v>mtv}^A6uPME%j$T=do233TJ1 zq>&H()%}yNGbhy~-$CpPv)%>Qy4qIb?R876U$JjZiky_25Z>*R=I37;n6P(*cp7BK z*M5)Rd*`&I1K#UKx(+UUgq-qZa{1Qe?K-R4yqKGkj^Vc+zk^gxu^C;142OqiLmkr% zoJNv36vM%+@m<;n*kOXa$}4LT7sg z1|^*=#wu30A>Y*Do4KBuGL0uPR3iN|B#}_`*6;>DQcZT5b8odY0cmSSK59AiavbB^ zm#MH)m+67WXSkw{mvGrdCW&oTB%re`u^dK6jQtUMbgVg6vNP%`DVF61?oV6p?kQKm z9nD&EqapI5JTzu~X6&Cv)A~%r_Ij`C?rmR>6SoU7iA&KRb4jX&uQ3to$Gsvs^?BtU zM`$KikGn15)6$7qwezn~?y$}$4(3Vc@pFI2hFT|3alB_zq#`hTteqsIRt%2cg?xQE zh$!Obdn9N&d-lwGBOnL0ZUnN$UPpfdSfH@E(Iub9VFubl79+Waz1|qEvWaDSUkS@I z(>A)XdK6g|zeEcl7|%R2YR*hdw646ih1;AGc&Eag zP3n42$53;`+{a}0*VcA$PZ(fkYm!?|nI%;?FA20I`9q^C!Jgx@9o!Jb7#@wMb^;Jj z%jd1h4i0F<%XmXE*5x&mj=9dUl_Z{T`T8{=(9MlPcC#)~;nqy5q^Fe=;wMfeGN*&~ zBz$&lVuJRZ(%R!nAL*4_epiw8qQQ>tZMk{&&EE8k@!Iq$a$cvhCUtlo?SR+#IG3Se zvLzxyAnoL{(u^DeyK7$O`(V5*FeWh$&;+VNsfA?@^onIh{cz^eQ5{5F$Mk*;%NWtJ@2P_HN_6g@=Yv8d02G}D;5^w8{><)DT^ zxko_ z)v6KVNX!0qva{<8`SDb*HcGo@xjz+lcs$45|@>e7SD(xI^y}( z7gtTuHtorX;TXe7Vo!a3JGSzrhn!=s*Ajjn`Onfu&DA*-iTSv>&zZGK92q83Io!HI z75}3V{0%p-zx4Xb%eUH2Wyi)}yi#0d0MaX@q(D1yWwU8;jIM%|2FC0oSTEloGh$x# zS#OG=q$EQJm|ft$Z`Q}!7epS_8M04is;jGde5&c1uVn|E8Zf9&tCz`z)$qsri@-`R zz8V~ZTR~@){S7Cag;%l^+2i?K5?pj6yV13gYW!uYPA^TKq~DpC0W#cIl1^59$ca01 zc8UBbm8Px_WC1O=Y5!Tw=64QYEl-X1+Qky4BfA*o39M`3(7;uCL*a^I`ZLaY9j9)3 z>&9?^EuvM#zP*m>?zP};uI1|-#5SC$W-^sfm@pIufJ+^kP;-S?6&vN}<*95ulhs+1 zmsJr@OL?u-QA%2)J*IOxud%!k)*`t2%m#z4F0fDpa=67oK=3UvFu5YY|tK>m9 zwkq&%oQ51b1yyk1Ue4Z}43UZ_-q1435zXuIJ#UA|+mly$Mp)d#&d5O>zF$NK=z{G# z$HixImuTNU{sQd&(ilwi$)I%m8#k!`Zy9zUNN;`7h%EW#N&K|Re_TC3b;@D$wL&zK z+z#3O%?=qjRUZS#97K_sc~@TN%T?gvAu+At@0d=*;Myclxp2Y@(cnj^8sTI1UwjvZc1cjc_d{+N(z48|Z z!S5U;-FocDB-Oq@ab^;9qQp)%6`pH1j3B+~(m6e64NKQwiUp1&_kZB}g>66P=R|ck zVU;Yy`)Gpy!^r&YUnVb((I0T9##+$-TzG~HAKn|56YJQY`R7I)rk|Ht02on?0(9>u zj3aECNY?2-Q>&F6_MZ!PxOxqo4VuY5WEKCI#9tN|_??^@@L1+idrj(J((d1%cI-F; zsDDG8JhEzkCdB_Xp&th);{$M-uB-_D23r5eD)@o{SP|IYhWPA-_kf{`UT9ux%|~TZ zguT=F^*_QWiDp>F%!pmA<=@SA|D)AMgjU1enCBeod#fHm!m6X4s~P2SyC{#u_NrUz zJ`SVWYg)OAUTuy90B4xF&hRtIp^=vGfti=}=24^G@QXj!IF9X6mI1{XYZTq3?gp?6 zqek};I^)+_I8Jz6~4~hbty0 zE0kRDh7Nm9HOzhprzN$~NEf293R~=Gnem>BT>=@fpL9J$?)rlaILfF{iQcnOt%b2* z0UGR&BD=qBncvGZ#1#b@Ov@^9YWMsQ?epe(ai55WP+qY8?WyA45@!*Mi9)Hf zl`nID3eGS+BHXD!fkl{CM}O}9e-xt8kq|p&(TqHa8M(K#$Mv04j7H`wr&vhT3yDv} zTjQRPLu2#jq@tj{g{)tdx8d$_yC*&`$C?%(dY6u`{!v~Wc82IUK4C1w0q%A3ChUN5 zor<&d9E&*~=ZqffiZsQQC7XWR7ta9f>fS6H*%6!Vo9gV99-41##wuE0ZqT;2U+Xj3 zMKh?EYSSzf4;_?j`ZzPvVrpK2JLj0f5%&>cF-t;=zJCLQrw>Y0{&7Gs5xHzt<^a}3~LNgiN9kr14U znF>KVzTBFAZ`Jj6neCX^Cgr4022N$qIoAt6lZ_riXZDUq_0)7RN9flDxOP;cyl#Qx zFvt4_JIyBQ@#h6KbBA8vk1*=YzlNS7zkJvFl`^%TxC{RCqA^?^F18POFJ9Xrhwup6V!ig{a~{y9(BzvDuOFuv zUe2}(_n@ai?^&*HUd8>LVN7oy@z8@l+EVkj87gd~F5^5^e8{SI#C`mCtI2QW5Lmbs zhj{a8?xd$rMZW?Lk z;Qe^*dh~!x>K8;sRDYY@vW< zsyKG=3xwzQfTT@(M7bj2YqB*MXoD;tm&h2j&#D=G?r*mW5KLFtjQo9O?QD|1ml||{ zNX%!gi-Eo+N-TpE@=*6fGcLeJuU{D*p9R8#Xm($W+xH!VeP`Dzk8Uz6Lj^4$+R&zo zwNs(uBI>wAxA;m}=;O6U&z3c_mnkO;JZKzW@LG>+0(c6;dffNR7NfP%UN|fabikLV zBflMjUGdS@j+vK}RQ3qY`U2r7f(a`;-xufu*wZ2P-h>(Rsp)+l%!yDw0KqM~H7=VQ z-{aHgv!`ozaF9@^;tv6`^Q6?fu4^kpZR@G3WA|~YOBFOG3lY*KJ`;Lt?`@q%3TkTi zHtwvwN7Ms-=`chug?sI=xfJN_`2AP%O9IbKWbgyLAjGhhR97XRBqCId(8@YwGwwY$ zaPb7>V2^UBHM+xU`G@8DYIC$wZTXcN5s^IO`ec;3buiAU=60*4jPr%NhJL=gQYL%3 z_KZ7~lI=PdGL=X$$`1$kStnG!+WxWN7PnHYa0~gu@lF}ZKHp->@!!YFcclWq>oaC% zruv3(crN>m*&tc8a@nM0>#h8qh2PrrM(qNQGeo9U^r=kcLW@W&WqY!fzfUz}g)m_( zyFKAB*NE=@usyADxvFLg+H5dbq(g=q$TyTdz;C(jE(VUEXSPeH^D!LdySyoamOEi7 zR*U@4?oMnCE8g54LUP-qz1*LC7C{g9(NFtt5F#~H#D``F76cH`b@^^*)&o7pbg+BR z%{+j%Fo;2>|mOKCwjLen5SV%*>Ar(|x?JZ`wAitv8i zq=e7W2%D$9E5ff7OOrtP`XtGT>eE6;M&bYD_$T5`J2UF#vD*Fv_7!s62%@|cbL#jj z!xq0wr1u8Q-z;!1eF|@cVr2>q9s3L&<}WrAA5ZlBMuEme(Jo(!6~4XJXHN(>-b=K` zIm#_IT69%L+SQj}?f3r3x11_Z>z0UY2k(kTjstsG~vs~1z}pP6a3=CSNf9(A3+ zZRfpSP6(SA3=g0JIRw{ktX*`0e0$+8={~u${d}Z^=dnAU-M_us({mUjoH@T>Zg>X{ z=bK(ccA{l4B!4kRV79%JoX*myczXl?qI02>s~L-xS$&nXNh64DC#_9^ij2dNtF{_tq8#&c=PiqR z83<81tB=({Qwy3=d*MLqW^Snz?dX6saR8vXBP&6;$81I9JF}Q*0(REBWEkrEhqeSX zz(wMAR6DM+v@=bka5g+bE1Odiaaw8P{^!>SH_I%0UCeA}aX}ucsQ6{bsAGriNOjU! z)GG{Oy@HRRHH6{_6?clAwi&-$34+8;4APAVQ5kRupXJc&l`zQAL@E(-Ec&u)<+5k? z*&`y+szB1N)g=lwLGBGHWgQIT)RL7!$;n)?j(TM}b?%756H3R-|4 zl;e=S+M&H4EnZTnS3WP9-&SvNztU#ndjIfYlq>kUK;%dA31(r64~03~?U%`0UZ<)e zrBrM0%F=kMKRWp2(k64}^kOwcz|b+g7K>wjMRC!|=gZp}7ZfTWv6g_$<3^ca1Zc&_ zc`V9i>_zrTtjZNft-Q-=77&u1j}4w4EdJltxG5AY?G1=M=mW45&h zlDhnQ%yeiqvJEztrv_YO&hMpWzz5-CWI`pPja%{HYp30;P z8#RysDT-aX{(=*+o?$&+Bsmxt?$H=1#t;id30U?pB{@`>=zK8h5+`h?r)dOm=+?GW zY^0Tj#&V?xO4r3){@XzRSX2q_!*K_Ios{KWJMYh_ul1f<+ZA8vtq!O|)m4JcWmnyqr1*J107$J$Pg&ZkJw~AvPtvib zGs(e+&QYd!bK#^r|Iq>b$6W=!@H4p#97=1^h|kys$L|*C&gRGr&rtlsY|LhMlX3!* zPhf%pR*!u%kMlUl_*g~XUhPv|pPYpl%CC^DIx8gP@G-a>tv(k$>b&S4yP%m=&>`2L(;g2wMdOhn3QbB9ukJ#c@N?9lW#D@s zvom}enKN)RgP#>9*K9lt_ESruYZ-)#T^6klp0Onwztv50Oe0^l^pp7 zo+~QiIsvX+)WFV2TqK@MKnKnL4#@m5Yjvp27OqbUyT9N(3x zcn}qmR1?>r?-TY2qBWQgqS%1G8W4!VT(9akviR~g`cC#!X$aqnW>&Ev2}P628)CB# zLtEGq5Qw~F*d=}XPN^>`4!htY&8-tz{qMyy4Jv^1fB)>^dYl99_0xu_-V7>WgEtKy zlKgAj;Pj;(bDu9Mjmv)#3IFs^4xh(zCIcXP$bN7j>@ZegqnhcHQuu#e2;hEvz7yu+ zhftts(+vaAK%a>lzZ^d236Xn3#V8v9#)X~GeeBcuFOj}q&kd9FDIydX(&^9oDe@g#8ypenKhEQ?k92-S7F0WV&$4j+Us1W`2@H09+Bxqv z#QeOj07N%&EJYnkm<8a=ZJJ9&3wMh=`=ATMY@`1)C=OS4^lZk2X7T}4uf{eK5^Hl9 zVz4EGta(8!1~T-<+cN0>TSq>vKo*w^xs88u48mh6oV73ps>>|Au zA^(2j4wvE4B9Xbg7f+G4DpavvY0^2ELT>)BTe;x)?|t0{;0dDwqccD$CR z12#gpoDp(`D;@tde0o&#;&7E|(*Ge|=n76isyo3_JZM$fxqY~avgPJ!AcW|bLGe62 za6&juUSEwRKq8S?EQw!zGw@$`@EB3ft*}8q@s$@q1*W@(&!v6E;J^sI+hIXOxP$OY zi+A4Q3q3h5+}h#7|GAJRRT8784p_ABG9~E`g(aTV-IDTr0Rc$w`@D!1=)S)2@nDuj zTsr3PA3;gr80DPK@cybw!M8IvIMsVmgqWZ(GxDABbjGV=l8CZ-nd?X3&_8d#$x8_t z;)60KXJrnWoGF=u4*eDUFS`NMG;blO{xWb*3;|Nno z0k+VY{#D(HqhQX_(EcJN!#)#TquS}KZvkUq0lc~T^0RCPOnHO06|`G0?sNjntchZI zuF!0`*CkhKEvaLEZfEAB5D);9k2sb>zt3yY0Mm-h4MM-8;-2}-Q~$mkQEw$=P97xY z`&5BTHAr+#WDeLtwlajcV_yfc?j?i!QC%$N{=O8fF_mK;%Ded`5vCyke>okNcsw3T z-7)GNUtv3O^U89carMb(R+rbUG_xaCj{rV@j~f4wt#ZtMLFf13&|dOgzSAH4#L13X zqE8(+6axV?M0jF{5~O^`_*oa?qIzxO{L_U#lysAcr<;Yg~ zQi~29Cox=%W0kC^vl>hSGUynH* z@qydve;>}#+b+nR093F=_Z!~h|26VoKLr%^iC+)Dpk_1`MhGYZNJ)eEIU7%08x9leY{R(Sl9Rqvmoiian z|8LkyU^#nJE?W@&eK{o-NZAak9#z<(Z0*J~+$lpHi~2yRF2f1Q7GzzQ7h18F|?Y;FbEZvfN5Q z%pt&8u;XVC31Bm09b_-jwbHTC>{o30JN?MAy5I2=Cl7muH!cFih|l1At2K=wjz90D zpVOf?WGkxR`E$o!M>c#GS{sePIT^3K6AV`eT9^HAsCOP8HZ2~lu%UK|<+mAqNi+3p zCJT2T@zYRCC;a;{dY;|mvP}S+#%Cz{o|<$4y`8JVMz-S+*Ut;9Et_i&67ihrJ@D8ci;@8Ul!M zRx-Tzwj!dnYD0-GjZ9wR%Rf!kktCu6v_5B`7O;&1pmPdXg<8F^km48 zr&L;U!fk=qLBO6$RG)oMIQD}y?{E$M?)eo*CZ9jD_WTSP)!CZ_HrHQPE_Yoc?0 zKu>wGqAsyL;%k;QtXjEOs@CfFh!`)ZO^Y=3GHvkk9#?_|XD9o3JjUDAVYS!13iQf} z7_VN8nyXo>$8?h4ye~MP2x<1ZKwGuZM16u$?)trtvvg9I`_F7H4zS(>itY#uPsWD8 zkUCe+AzLQh^2vpdhz$%9I%o(OhZZ8+J7FMM-aH{|>UV;FiOWBTXVBv_P!Tk-%Cyn=yBLS(;VkF~rGCH~? z3AlH9;$)y$PVgi~==*DWYdUuIiH>ULLerygl4={CxH^s_`}pJ6&#NZJMGwYu%| z^E_0{7DfYx>BpZ|u0RGj@7>4tYh3KIAhgHvFsb((=egcdGn?c(c?MwNYaZK9%cZ!B~#om zp6?0Vzwh3WFl@PVJSnYEEnP-Zd2j*Es2o2{!*l!^7d^Pvn;;X-qV*8+AW{Y5`ql5$ zsKxTQ(B;qJQs;#}Puk%&7tpf_cB9O@PI_g2Cr-UpNpf8&AF5uKgc0y-keN1C*k(u^ zdFPj)3#ENiapZnqyc$>mL6QFcqEMPf|AJ;N9*2z-S*z7ptzP0-tA1EMR3rI!xkjUVUl78JH|a0s(jv<4gsLSEncy(W>vU`7N=T^y8{gnW=O7(&tB?d zR>HS>J!dd`nMerR-|SPB)Zkcj=2&oorOGFtNJ_H@4)tUXRMfNWrswi4;V-K}*LbGW zL&g4Idtd$!<@*1hgyP64$}u&FENv7cHFn7sV~;STq(MlGov{>=C2>k*U&cPNX0Ir1 zmRXqW4#_OWK9*s|_ntbP<^4XN&*u;L9zS@D`*FMHzOU;=(~1+94z=9d(c&UnJgDqO`k0jt=nYO&I)mD0vT zt^ud73@S^`%n};r>@&`KbjAnN;3wp+RO@`Ye^N!NDoZ)y`WeZM&Ryp4)%%#5o4p4W zgG_tQ&m7)tE1*_UHYYLdEVuC(p!y!D)f#S-Pft!?eTto-Yiw^Mg~~s$A@xK7_#lgv z`bhePObpmBjZg``m^=jY4FHGRSDZIP2~l{6_lKmNc_Thp=s3)$v32jj`ZPk$aQ z_|a0|G=xKU>%e&YiEkb`PCmyXVH9vs9GQ<#LJpjr?A;}?Q&Km6cUrhLsc_FY(d;@y zrfw5kLXOMuy_??bfc}V2zLIMY>YHwXYC}juoz39{TUJk=0zgS!07u?@W~ z;LyWg&O(XhR-lFUUe~sp*`a66z4s-(QJ9n;3qAZi@{A~9+<-Z1Da^!T^Q0FJADf!Y0?qfk^1ne{Q_xP9>+Wt@p=Os< zm{qYDOqX}dHQbmOt01Y?LSERbzCM8~cI#8Swr3Kd)$}#H^5R$Z9I$#wfawMcoaxuA zQIS>ID}1cZYkcfPZ+vNNsuU@W8_oi1j&rVK{jPa<)!w%96&d*N~-`iE_d(&PdW*mYZ_h#nSbnF66$C=l%&{`u^8k z>fFUkO+D6tVA|T^KjN)dTdvi`jL>LJ;?4##LvmEPXzQGYte4pXdg#4KB+*)NQi)QV z#x*MW*e5o~)7SQWH28h+eMF{t;lRg}M6Ul~pG{2k{AVo7^I7U+#1&>bJPHmtGY`LCp$SIiLu2f|%X(OsM zhU%pFx1>sw^80HFy1L(a^cEVVNOQrE?z+Ll;Z$(^rs-KIcJz)!4Sh{C_(9F%AXJn7& z1f7-G6DkAN+d`QvvbymO{6UXMu{?Yyu*w`Xeu(v?YIv5K2-YqqHu7gqVyFAbJCKx4 zgaNwyFa+_?e`r@m3vKciDfOB3y2Z0gua_t*zq)uBsT`30>5UmCiQc4)#$S#mI1ec2`T2H_S|${7vIY*_}d zM@9qKwWEBGi0|ie5ekh8dCvNx(csbz+Ag_2ERz**t#fK_0&oJ9v$T~4DA+^7B9C!y z40)F0EJi=`^%ZnL;^R-YnSKnSuww|8<*x*B%o{tje`DlseFU2Uh^KHZZOwG2nzXEB0aAc9j$fW*BAwwU8j^`WsqkLChg zTcnp7vE#1r_P)a03o#$UHfb~T5^hRX7{k>c`qf&|@Y9L>DtUps+&10!&F6>ud{{~_ zB>3g)M;iKK$1s&Qi>W3}1-;iKEF?D1InW2>U#}B!gqebzg3w8n$xq!G#cR=hHzl)r zjS+`Fvm3`LToFH!a>!)(Pu}&-pZHaTbED=OXGAyiU&Sep1$3~|=%o^ujiN%Kw(pGG zWm|*j`@#HA;@)p-RbLr4`UJA!I@^)(;m*^QAk=BoB&w{4;%HWP-YYG|1KHsBxf{N< zPgsbY9Cu>njbjF zCWnIxGBi+a!Y9|B{8swg_U&;a8R}lWP>+G~r_MKS#6GpfJ}%SJXM*M2C?kzE%8?bt z#P#K;O4B+~P5||z@1+KZhjCoBF4WObb}_Wtn0z4LqS@B$SZMfbrxE=)!}eK$R`r_) z!XF<=UD8pjlZWGy`=yL?IWiH`0MxFfj(RI*77MS-b-#3M8NZX=~u zWodD86>HIX>H-(Lg)QD-bre8 zmsn%6GV7~ei23pycyv@{t|Pc2@rrdfO9<@-`pn0_1qRmEYXh{Dk8;hQ_*O|gTP(6A zU6jF53S;^&z7hUNMrVs!h!>qh%6U3u+A3s~R0$*kEBRt6DvFz5Cu$5BYkJWfA!iIA zkhD{fcWB9*kQZgN+mam2d`>>TEwW|OJt=+$S^q?)r#XIhu;#&mb$5UzIW@>GgcM*s zK>F&mT6|}@!Of^gTn8V9r^JSN0+*7>*GsAoZT6nh#c;XMbb#G^^BWxdqCw=^ToW~V zD!`8mbIOnT^k|ruLpV{3LG6jkwJf=&f_C)tHWY&sRk%1dLFqZc<|CRRfZF+{E7|-% zM1{$tIqqisA__;#A>hmla&$J70c!PL_b<=gjlefonv}y2Vwu-gQ1V)x=YRbIc+Llwz2Sww55k(Tr^74O!kfGVi;~_qu zpK$RWmvophoNC1G2qP6cDrdSo(fW>wBOwcVVxg8c1F35B11d(AoQC;H*DHcsP{P+w z$q!Jk(_0 z6<=|(!X{sVUb}=(F;PD@lNIb~ll4fnuNv8{$Q%UPLl|Y8HHM^>YC!kT0ytGr`3{0m zZ046bZLBKcf1Pc?oD&KKMk)G-4>A?O4j4=ruKc$9mt z+Cnz=3ung7^GkW26lP1Pu7E7XKdkc~8=USkx76e`mCMPc_Axidw;z{b+H4awSOLQl zOwHwZ#h!-%6DdUSEtk=<{meLhVhy2Sha^$8TML$GQ)VxUB?&by<~D)jN~ro0h5a&8 zb$)dGI0rwd*(?+^>+{~jYpB)$Seccv$aV->D1%k94;}`W>6Y5IjF___mBC7iIk=2L zMk_prw3nr?IihPq*GBy>MunwE?*)S_dH8dh&&o`)l0Wz|^JexhHjl8Iekk5dY(+je zcxraOrmPDfn6mebeKr7I_hQ!X%Q%l%h`7wN+mVPX4AC;Gc2oB2q7FK*VI=>yWvVeS z`G|!5qg{)|Evsm%2H0aJk|lLYrGM;@;Sj8nUDS^ml`EX{mN43I!&)f@FLY4KwGYM#NRh4F_QRr1qm1)uWgqJ`h6&qPGl=t_3xD22i_e+Ka*%Gg1Z*#s4I6w z^Vc|r_0^MnTcFz3dgm@<97fNY5N!!`{tJ4XjN!Q~4mH1}_cFn+j-W&vXkr&<5@~V7 z*O5=5L4z9FO4x{YomKCa5V}9MJ^Jg~ILZ-$h4$B=c44+7ErY!lh?(rT@(zxDK=gv^ zcQF)*UMxlS>Y#oP>7_=8Cr+pUV6=4y;mXZSDS!ji`%{71u|a`Wq;s4!n4X^>8kfQ? zAbN|)g~!TSI|LSzQe&lzpL2hx-Rl{r#@~`4s@C+fRF$ZF{U1^19zHMvil2;rIHDQAG^yRn% z-bad5XSp-!P;x;^3muH)e*v(o4=5}YGQ;4g%X?uW-pC{rrp%6(OXzZ@C4i(q{ zgooFO^KqPjY!?>MKsPZie^rU-^^dqGH02C{xLV%lsxh{g0iCl&0(sY;)bw!7pk{Hv z&g=b|uET7dw9eg`(M$G&wkY4NkNpnjfOY?dIS`EgHtausi5qbFQqGXJ>Df z08^5@9OFWmh!1->k2_#(e*_qp(qaCfex>2cc2-cZQ4|MDfC<;-=cUvZIzKKL$6^aYD8d$gLF zVRuS`p}xC8YQbZHGX@38+D$#rD*4dvJZ?C%eAdl(q9gaVfBNP0%S0C+t6vnH?<<@4 zfFG=ar~v!OIs_gplicu4fEj}ph?yI0e$i$t5ylJY3Kaqp$=uus9`{F#CUiEE~+7d~8 z74!VcBjOCLt=ISXE=GCVMsuqoEt6p0rDC5cE9f~<+r-3>5J2GaRg@(|??daRfbHCM z08vRift%MClFA(oF)ej(BgcGX%U8mtecCHg%XtMh!*;2`vlP|E*%W<`*f*vqJ>Obl zOW?Be+jx=sfKX9w^m&4KvbwTdl1u!-b-WMb8ELuXl^FO4TU#Ego2Y$OUVd^j1dxwa zssk3Ub$h08cPp(6rvrlR`90u@X@b>DW~ zvG7hZsn(xRqSn4X$>$|hCR%=_rpP{I@r{!bNYTZp^Xg+%YC~dt;>yW^3Oj2DAQDie zFg4nht9-x9r&tsU!f{R0*Dh~w=F(;XMBG})Qr2ugm*alV4L3dogoS!NV_1T{012ob z{MI*GzbbqqoAXCyjQG#er~Q`j9Dh*Xy*piYnZDc#1g~7z)qO*N2-@#CE?*n6&2mIB z4=g&koGN?yibu@8;Eqwa%vJTzNj8b15HUBNPsQ&fex0Vcs4pbgI*?)mYL#n0*{m7= z+~JPd!z*O->?v>mNB2my+49>O6K${kWlk7})|RsfG10K~-*OvvrwP!bmJn?l%QG?4 z%VMXsio*9iya%7j5?phOG>PaE4L(@~WNmIwkJXDb+9E_M%ln9k&}+KVfj7p2=Uc~G zB|Oq{{9eYFuPkMhcyYQA>r*+9Fyieuk&bl%0(RBb|wfMLcT`-i^yft-(Qi_b#O3twg@-_jFJ!x4=0Eac3L3&; z2}B^egh_5H%7kG=F5&3OzRvyXo3m5Kn1dxPccwqY!T{|`$rY#biRY_d=e>=W=gkkQ zlwQ=!AnB`Z*M{ak?dDm@!>dq&!lAfhwm1ffaSO`mO86=EF%hLIN~1p!PV#7g?IaHaX1cy)%JcY?Zv3i zK!Zp_?wEuoX=hS#ZB?YM(`^|$G`ToJO1~ux_f59$ z?%1nh#J%@02~d6!!94t+FW`ak?cX|Lf;T-c==oAMwF}JB84dv>-d*pjoH}MV3BXj} zB*yv7VZ*$%lQ!3w!?rPfq1G4#W=Ia++_!5_M>T2li74@yE7I4k;0r~RZ12>?Tk5XL zI5)(cq91lcQ6RV`hZ(?Rz}`ojSnAmgKDNLj*_0!MS>yIkGIiW;L)k6I%rz@~*I#Ar zbKWl>#!4dmJy$iUooVZ)bGT90Mg<^sdk&TAOEwp6umSc(8V!-?W*| z1IR_knu-dgN3+g)q-VahL3{LH^{?J_cXG5VHehdq=cpSxaKIIYtboznnL8t3g7%M6 zT>(gaW)j4vC?bG!mbxHs%JbbXKQaZd<9pnC5G)>iuCno<9y9I|d{Pc9>U>S$=Ew%%;?T zO(}^m0=8IsQle(k{N0bq>HztPd+&Xjfc*3A98Cej^v)E1${+2Y|F}UmD~;YW3xu(% z0qts-!h7Jjvklc%?)!xn56B9hKGt^89Q4l@{_`@a^_qjYwj*o*xTo7+?muFkU9Z`G zOzQv02LZAXcd-mJpB?%?&ife!2oQ_Pg*x9y2&aAZw^H)|l7kabPAHcCqbL}6Fs)@m zG5Djjl1l1oMWfh1KV_0Fj0fyni3s||!s!GN0O_~0O2hvw$i$DXpkpD5w>ze_H zE3&l!mv+3{|2mEl5O(^83)s--Z#Ms5->qN)jP}1b_t$sp-;w+4D#^B(e{Jr+-nD<< o)_<3he;47u_?G{_UWj#+bb>`woPv1u4&cY&B21@J`)cI>1B$*bPXGV_ literal 0 HcmV?d00001 diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..8e882f12 --- /dev/null +++ b/go.mod @@ -0,0 +1,153 @@ +module github.com/IBM-Blockchain/fabric-operator + +go 1.17 + +require ( + github.com/cloudflare/cfssl v1.4.1 + github.com/docker/docker v20.10.12+incompatible + github.com/go-logr/logr v0.4.0 + github.com/go-test/deep v1.0.2 + github.com/gogo/protobuf v1.3.2 + github.com/hyperledger/fabric v1.4.11 + github.com/hyperledger/fabric-ca v1.5.3 + github.com/hyperledger/fabric-protos-go v0.0.0-20200113171556-368e201877dd + github.com/imdario/mergo v0.3.12 + github.com/lib/pq v1.8.0 + github.com/maxbrunsfeld/counterfeiter/v6 v6.2.3 + github.com/onsi/ginkgo v1.16.4 + github.com/onsi/gomega v1.13.0 + github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible + github.com/operator-framework/operator-lib v0.8.0 + github.com/pkg/errors v0.9.1 + github.com/spf13/viper v1.7.0 + github.com/vrischmann/envconfig v1.3.0 + go.uber.org/zap v1.17.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.21.5 + k8s.io/apiextensions-apiserver v0.21.5 + k8s.io/apimachinery v0.21.5 + k8s.io/client-go v0.21.5 + k8s.io/code-generator v0.21.5 + sigs.k8s.io/controller-runtime v0.9.0 + sigs.k8s.io/yaml v1.2.0 +) + +require ( + cloud.google.com/go v0.59.0 // indirect + github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/Shopify/sarama v1.30.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/evanphx/json-patch v4.11.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.1 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/go-kit/kit v0.10.0 // indirect + github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-logr/zapr v0.4.0 // indirect + github.com/go-openapi/jsonpointer v0.19.3 // indirect + github.com/go-openapi/jsonreference v0.19.3 // indirect + github.com/go-openapi/spec v0.19.5 // indirect + github.com/go-openapi/swag v0.19.5 // indirect + github.com/go-sql-driver/mysql v1.5.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.0.21 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect + github.com/gorilla/handlers v1.5.1 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/grantae/certinfo v0.0.0-20170412194111-59d56a35515b // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-version v1.2.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hyperledger/fabric-amcl v0.0.0-20200424173818-327c9e2cf77a // indirect + github.com/hyperledger/fabric-lib-go v1.0.0 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 // indirect + github.com/jmoiron/sqlx v1.3.4 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 // indirect + github.com/klauspost/compress v1.13.6 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mailru/easyjson v0.7.0 // indirect + github.com/mattn/go-sqlite3 v1.14.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/miekg/pkcs11 v1.0.3 // indirect + github.com/mitchellh/mapstructure v1.3.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.11.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.10.0 // indirect + github.com/prometheus/procfs v0.2.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/spf13/afero v1.2.2 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.7.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/sykesm/zap-logfmt v0.0.4 // indirect + github.com/weppos/publicsuffix-go v0.5.0 // indirect + github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e // indirect + github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/net v0.0.0-20210917221730-978cfadd31cf // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect + golang.org/x/tools v0.1.2 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect + google.golang.org/grpc v1.29.1 // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.51.0 // indirect + gopkg.in/ldap.v2 v2.5.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + k8s.io/component-base v0.21.5 // indirect + k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 // indirect + k8s.io/klog/v2 v2.8.0 // indirect + k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 // indirect + k8s.io/utils v0.0.0-20210527160623-6fdb442a123b // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect +) + +replace ( + github.com/go-kit/kit => github.com/go-kit/kit v0.8.0 // Needed for fabric-ca + github.com/gorilla/handlers => github.com/gorilla/handlers v1.4.0 // Needed for fabric-ca + github.com/gorilla/mux => github.com/gorilla/mux v1.7.3 // Needed for fabric-ca + github.com/hyperledger/fabric => github.com/hyperledger/fabric v0.0.0-20191027202024-115c7a2205a6 + github.com/prometheus/client_golang => github.com/prometheus/client_golang v0.9.0 // Needed for fabric-ca +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..749d2c99 --- /dev/null +++ b/go.sum @@ -0,0 +1,1025 @@ +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.59.0 h1:BM3svUDU3itpc2m5cu5wCyThIYNDlFlts9GASw31GW8= +cloud.google.com/go v0.59.0/go.mod h1:qJxNOVCRTxHfwLhvDxxSI9vQc1zI59b9pEglp1Iv60E= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.30.0 h1:TOZL6r37xJBDEMLx4yjB77jxbZYXPaDow08TSK6vIL0= +github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/cfssl v1.4.1 h1:vScfU2DrIUI9VPHBVeeAQ0q5A+9yshO1Gz+3QoUQiKw= +github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= +github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= +github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.4.0 h1:XulKRWSQK5uChr4pEgSE4Tc/OcmnU9GJuSwdog/tZsA= +github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grantae/certinfo v0.0.0-20170412194111-59d56a35515b h1:NGgE5ELokSf2tZ/bydyDUKrvd/jP8lrAoPNeBuMOTOk= +github.com/grantae/certinfo v0.0.0-20170412194111-59d56a35515b/go.mod h1:zT/uzhdQGTqlwTq7Lpbj3JoJQWfPfIJ1tE0OidAmih8= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hyperledger/fabric v0.0.0-20191027202024-115c7a2205a6 h1:Nsiq4GTvhs5tpMYt/9wv3Er0Se7oG0rZlI75+e4gvXc= +github.com/hyperledger/fabric v0.0.0-20191027202024-115c7a2205a6/go.mod h1:tGFAOCT696D3rG0Vofd2dyWYLySHlh0aQjf7Q1HAju0= +github.com/hyperledger/fabric-amcl v0.0.0-20200424173818-327c9e2cf77a h1:JAKZdGuUIjVmES0X31YUD7UqMR2rz/kxLluJuGvsXPk= +github.com/hyperledger/fabric-amcl v0.0.0-20200424173818-327c9e2cf77a/go.mod h1:X+DIyUsaTmalOpmpQfIvFZjKHQedrURQ5t4YqquX7lE= +github.com/hyperledger/fabric-ca v1.5.3 h1:fwSYKFN+bEpagULVOOmQmZVc42FUbJI8OLj4aaeC5yY= +github.com/hyperledger/fabric-ca v1.5.3/go.mod h1:yT+T08R5hhetWcg9C00pRj8/0IxnzYy7kh/IqGNB47w= +github.com/hyperledger/fabric-lib-go v1.0.0 h1:UL1w7c9LvHZUSkIvHTDGklxFv2kTeva1QI2emOVc324= +github.com/hyperledger/fabric-lib-go v1.0.0/go.mod h1:H362nMlunurmHwkYqR5uHL2UDWbQdbfz74n8kbCFsqc= +github.com/hyperledger/fabric-protos-go v0.0.0-20200113171556-368e201877dd h1:dv8PcTulQ2/DEio+3NzUVy17A1YYt+0VaXnQ4FnjAKE= +github.com/hyperledger/fabric-protos-go v0.0.0-20200113171556-368e201877dd/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= +github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 h1:veS9QfglfvqAw2e+eeNT/SbGySq8ajECXJ9e4fPoLhY= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= +github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.3 h1:z1lXirM9f9WTcdmzSZahKh/t+LCqPiiwK2/DB1kLlI4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.3/go.mod h1:1ftk08SazyElaaNvmqAfZWGwJzshjCfBXDLoQtPAMNk= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible h1:6il8W875Oq9vycPkRV5TteLP9IfMEX3lyOl5yN+CtdI= +github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= +github.com/operator-framework/api v0.10.0/go.mod h1:tV0BUNvly7szq28ZPBXhjp1Sqg5yHCOeX19ui9K4vjI= +github.com/operator-framework/operator-lib v0.8.0 h1:w3y2/VEQXYui7DPAe0DAIEmTO22VDRzl2qRSxVDqeCg= +github.com/operator-framework/operator-lib v0.8.0/go.mod h1:2Z32GTTJUz2/f+OKcoJXsVnAyRwcXx7mGmQsdhIAIIE= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= +github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sykesm/zap-logfmt v0.0.4 h1:U2WzRvmIWG1wDLCFY3sz8UeEmsdHQjHFNlIdmroVFaI= +github.com/sykesm/zap-logfmt v0.0.4/go.mod h1:AuBd9xQjAe3URrWT1BBDk2v2onAZHkZkWRMiYZXiZWA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/vrischmann/envconfig v1.3.0 h1:4XIvQTXznxmWMnjouj0ST5lFo/WAYf5Exgl3x82crEk= +github.com/vrischmann/envconfig v1.3.0/go.mod h1:bbvxFYJdRSpXrhS63mBFtKJzkDiNkyArOLXtY6q0kuI= +github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/weppos/publicsuffix-go v0.5.0 h1:rutRtjBJViU/YjcI5d80t4JAVvDltS6bciJg2K1HrLU= +github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e h1:mvOa4+/DXStR4ZXOks/UsjeFdn5O5JpLUtzqk9U8xXw= +github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= +github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb h1:vxqkjztXSaPVDc8FQCdHTaejm2x747f6yPbnu1h2xkg= +github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 h1:kETrAMYZq6WVGPa8IIixL0CaEcIUNi+1WX7grUoi3y8= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200301222351-066e0c02454c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.21.5 h1:9zp3SslPRB+rqxhGKqqTo6VsN3HX0Ype1nWV6UQQ+Sk= +k8s.io/api v0.21.5/go.mod h1:Un8C5Hemo2r3MfPOjZvwQQ9KkBbiTBUCGrjlivo9uJ0= +k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apiextensions-apiserver v0.21.5 h1:sCUpiB47ba59J57ZsqOvoxD3voc2nnR+sylAzHIwI8w= +k8s.io/apiextensions-apiserver v0.21.5/go.mod h1:iiakfVazpXLW8OkF2sH/p9XGgfE7XFSQuZFJ10QlXB4= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.5 h1:56bnsHcUNboSCbD779GGi4Lh5kHTDFUoDrnHbhLTiaw= +k8s.io/apimachinery v0.21.5/go.mod h1:3PfBV+4PPXNs0aueD+7fHcGyhdkFFYqXeshQtsKCi+4= +k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.5/go.mod h1:0bWmrAx3dxUUFSEw71U91Si5obhIvBcAmf8oVZUO58E= +k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/client-go v0.21.5 h1:zkVidiWVgciPKYqWpMFMjCUF+4rRXcfkKoyQS1Ue21k= +k8s.io/client-go v0.21.5/go.mod h1:EUornVlr3rBrPKXUoMPNggJdEQmvFNMpYO3Kb6432kw= +k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.5 h1:7X6dJG4hzKFHChYpP02iF0XrXhenqQHc76QoKYzDZfI= +k8s.io/code-generator v0.21.5/go.mod h1:0K1k6o2ef8JD/j8LF3ZuqWLGFMHvO5psNzLLmxf7ZVE= +k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.21.5 h1:icFqcFDrO9S+FQpGohzVm6qce9vlo131K0r3NhElxiQ= +k8s.io/component-base v0.21.5/go.mod h1:UyRaqQfPkBL/haEFaMWgVQvtom5TqAT+jqlFGlh6LuU= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= +sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-tools v0.6.0/go.mod h1:baRMVPrctU77F+rfAuH2uPqW93k6yQnZA2dhUOr7ihc= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/integration/actions/ca/ca_suite_test.go b/integration/actions/ca/ca_suite_test.go new file mode 100644 index 00000000..02bc9899 --- /dev/null +++ b/integration/actions/ca/ca_suite_test.go @@ -0,0 +1,174 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ca_test + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "k8s.io/client-go/kubernetes" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + IBPCAS = "ibpcas" + + pathToRoot = "../../../" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../../config/rbac/service_account.yaml", + OperatorRole: "../../../config/rbac/role.yaml", + OperatorRoleBinding: "../../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../../testdata/deploy/operator.yaml", + OrdererSecret: "../../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "ca-actions", pathToRoot) + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + path := pathToRoot + "scripts/download_binaries.sh" + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, path)), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/actions/ca/ca_test.go b/integration/actions/ca/ca_test.go new file mode 100644 index 00000000..783b19cc --- /dev/null +++ b/integration/actions/ca/ca_test.go @@ -0,0 +1,405 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ca_test + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("trigger CA actions", func() { + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("renew TLS cert set to true", func() { + var ( + expiringCA *helper.CA + ibpca *current.IBPCA + ) + + Context("TLS certificate", func() { + var ( + err error + cert, key []byte + ) + + BeforeEach(func() { + key, cert, err = GenSelfSignedCert(time.Hour * 48) + Expect(err).NotTo(HaveOccurred()) + + certB64 := util.BytesToBase64(cert) + keyB64 := util.BytesToBase64(key) + + override := &v1.ServerConfig{ + TLS: v1.ServerTLSConfig{ + Enabled: pointer.True(), + CertFile: certB64, + KeyFile: keyB64, + }, + } + overrideBytes, err := json.Marshal(override) + Expect(err).NotTo(HaveOccurred()) + + expiringCA = CAWithOverrides(json.RawMessage(overrideBytes)) + helper.CreateCA(ibpCRClient, expiringCA.CR) + + Eventually(expiringCA.PodIsRunning).Should((Equal(true))) + }) + + When("TLS cert renew action is set to false", func() { + BeforeEach(func() { + patch := func(o client.Object) { + ibpca = o.(*current.IBPCA) + ibpca.Spec.Action.Renew.TLSCert = true + } + + err := integration.ResilientPatch(ibpCRClient, expiringCA.Name, namespace, IBPCAS, 3, ¤t.IBPCA{}, patch) + Expect(err).NotTo(HaveOccurred()) + + Eventually(expiringCA.PodIsRunning).Should((Equal(true))) + }) + + It("renews", func() { + By("backing up old crypto", func() { + Eventually(func() bool { + backup, err := GetBackup("tls", expiringCA.CR.Name) + if err != nil { + return false + } + + if len(backup.List) > 0 { + return backup.List[len(backup.List)-1].SignCerts == base64.StdEncoding.EncodeToString(cert) + } + + return false + }).Should(Equal(true)) + }) + + By("updating crypto secret with new TLS Cert", func() { + Eventually(func() bool { + crypto, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("%s-ca-crypto", expiringCA.CR.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(cert, crypto.Data["tls-cert.pem"]) + }).Should(Equal(false)) + }) + + By("updating operations cert to match new TLS cert", func() { + crypto, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("%s-ca-crypto", expiringCA.CR.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(bytes.Equal( + crypto.Data["operations-cert.pem"], + crypto.Data["tls-cert.pem"], + )).To(Equal(true)) + }) + + By("refreshing the TLS certificate with expiration value of plus 10 years", func() { + crypto, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("%s-ca-crypto", expiringCA.CR.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + newTLSCert := crypto.Data["tls-cert.pem"] + newCert, err := util.GetCertificateFromPEMBytes(newTLSCert) + Expect(err).NotTo(HaveOccurred()) + Expect(newCert.NotAfter.Year()).To(Equal(time.Now().Add(time.Hour * 87600).Year())) + }) + + By("updating crypto secret with new TLS Key", func() { + Eventually(func() bool { + crypto, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("%s-ca-crypto", expiringCA.CR.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(key, crypto.Data["tls-key.pem"]) + }).Should(Equal(false)) + }) + + By("updating operations key to match new TLS Key", func() { + crypto, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("%s-ca-crypto", expiringCA.CR.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(bytes.Equal( + crypto.Data["operations-key.pem"], + crypto.Data["tls-key.pem"], + )).To(Equal(true)) + }) + + By("updating connection profile with new TLS cert", func() { + Eventually(func() bool { + cm, err := kclient.CoreV1(). + ConfigMaps(namespace). + Get(context.TODO(), + fmt.Sprintf("%s-connection-profile", expiringCA.CR.Name), + metav1.GetOptions{}, + ) + Expect(err).NotTo(HaveOccurred()) + + profileBytes := cm.BinaryData["profile.json"] + connectionProfile := ¤t.CAConnectionProfile{} + err = json.Unmarshal(profileBytes, connectionProfile) + Expect(err).NotTo(HaveOccurred()) + + crypto, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("%s-ca-crypto", expiringCA.CR.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal([]byte(connectionProfile.TLS.Cert), crypto.Data["tls-key.pem"]) + }).Should(Equal(false)) + }) + + By("setting restart flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPCAS).Name(expiringCA.Name).Do(context.TODO()) + ibpca := ¤t.IBPCA{} + result.Into(ibpca) + + return ibpca.Spec.Action.Renew.TLSCert + }).Should(Equal(false)) + }) + }) + }) + }) + }) + + Context("restart", func() { + var ( + podName string + ca *current.IBPCA + ) + + BeforeEach(func() { + Eventually(func() int { + return len(org1ca.GetPods()) + }).Should(Equal(1)) + + podName = org1ca.GetPods()[0].Name + + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPCAS).Name(org1ca.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ca = ¤t.IBPCA{} + result.Into(ca) + }) + + When("spec has restart flag set to true", func() { + BeforeEach(func() { + ca.Spec.Action.Restart = true + }) + + It("performs restart action", func() { + bytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Put().Namespace(namespace).Resource(IBPCAS).Name(org1ca.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + + By("restarting ca pod", func() { + Eventually(func() bool { + pods := org1ca.GetPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("setting restart flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPCAS).Name(org1ca.Name).Do(context.TODO()) + ca := ¤t.IBPCA{} + result.Into(ca) + + return ca.Spec.Action.Restart + }).Should(Equal(false)) + }) + }) + }) + }) + +}) + +func CAWithOverrides(rawMessage json.RawMessage) *helper.CA { + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "org2ca", + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + }, + }, + Zone: "select", + Region: "select", + Domain: domain, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: rawMessage}, + }, + FabricVersion: integration.FabricCAVersion, + }, + } + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +// Generate TLS cert that is expires in the x days +func GenSelfSignedCert(expiresIn time.Duration) ([]byte, []byte, error) { + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to generate key") + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to generate serial number") + } + + notBefore := time.Now() + notAfter := notBefore.Add(expiresIn) + + template := x509.Certificate{ + SerialNumber: serialNumber, + Issuer: pkix.Name{ + Country: []string{"US"}, + Province: []string{"North Carolina"}, + Locality: []string{"Durham"}, + Organization: []string{"IBM"}, + OrganizationalUnit: []string{"Blockchain"}, + }, + Subject: pkix.Name{ + Country: []string{"US"}, + Province: []string{"North Carolina"}, + Locality: []string{"Durham"}, + Organization: []string{"IBM"}, + OrganizationalUnit: []string{"Blockchain"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create certificate") + } + + keyBytes, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to marshal key") + } + + certPEM := &pem.Block{Type: "CERTIFICATE", Bytes: derBytes} + keyPEM := &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} + + certBytes := pem.EncodeToMemory(certPEM) + keyBytes = pem.EncodeToMemory(keyPEM) + + return keyBytes, certBytes, nil +} + +func GetBackup(certType, name string) (*common.Backup, error) { + backupSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("%s-crypto-backup", name), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + backup := &common.Backup{} + key := fmt.Sprintf("%s-backup.json", certType) + err = json.Unmarshal(backupSecret.Data[key], backup) + if err != nil { + return nil, err + } + + return backup, nil +} diff --git a/integration/actions/orderer/orderer_suite_test.go b/integration/actions/orderer/orderer_suite_test.go new file mode 100644 index 00000000..7dada9e0 --- /dev/null +++ b/integration/actions/orderer/orderer_suite_test.go @@ -0,0 +1,269 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderer_test + +import ( + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "k8s.io/client-go/kubernetes" +) + +func TestOrderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Orderer Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + ordererUsername = "orderer" + + IBPCAS = "ibpcas" + IBPORDERERS = "ibporderers" + + pathToRoot = "../../../" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA + orderer *helper.Orderer +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(480 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../../config/rbac/service_account.yaml", + OperatorRole: "../../../config/rbac/role.yaml", + OperatorRoleBinding: "../../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../../testdata/deploy/operator.yaml", + OrdererSecret: "../../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "orderer-actions", pathToRoot) + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(ordererUsername, "ordererpw", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("orderer2", "ordererpw2", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("starting Orderer pod", func() { + orderer = GetOrderer(profile.TLS.Cert, caHost) + err = helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(orderer.Nodes[0].PodIsRunning).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, pathToRoot+"scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, GetOrderer("", "").Nodes[0].Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetOrderer(tlsCert, caHost string) *helper.Orderer { + cr, err := helper.OrdererCR(namespace, domain, ordererUsername, tlsCert, caHost) + Expect(err).NotTo(HaveOccurred()) + + nodes := []helper.Orderer{ + helper.Orderer{ + Name: cr.Name + "node1", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + helper.Orderer{ + Name: cr.Name + "node2", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 2), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node2", + Namespace: namespace, + Client: kclient, + }, + }, + helper.Orderer{ + Name: cr.Name + "node3", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 3), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node3", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = cr.Name + "node1" + nodes[1].CR.ObjectMeta.Name = cr.Name + "node2" + nodes[2].CR.ObjectMeta.Name = cr.Name + "node3" + + return &helper.Orderer{ + Name: cr.Name, + Namespace: namespace, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + Nodes: nodes, + CRClient: ibpCRClient, + } +} diff --git a/integration/actions/orderer/orderer_test.go b/integration/actions/orderer/orderer_test.go new file mode 100644 index 00000000..9d6570b5 --- /dev/null +++ b/integration/actions/orderer/orderer_test.go @@ -0,0 +1,527 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderer_test + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("trigger orderer actions", func() { + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + var ( + node1 helper.Orderer + node2 helper.Orderer + node3 helper.Orderer + + podNameNode1 string + podNameNode2 string + podNameNode3 string + + ibpordererNode1 *current.IBPOrderer + ) + + BeforeEach(func() { + node1 = orderer.Nodes[0] + node2 = orderer.Nodes[1] + node3 = orderer.Nodes[2] + + Eventually(node1.PodIsRunning, time.Second*60, time.Second*2).Should((Equal(true))) + + // NOTE: Need to keep same operator config for duration of test to ensure that the correct + // reason string is passed into operator-restart-config CM. + // integration.ClearOperatorConfig(kclient, namespace) + + Eventually(func() int { return len(node1.GetPods()) }).Should(Equal(1)) + Eventually(func() int { return len(node2.GetPods()) }).Should(Equal(1)) + Eventually(func() int { return len(node3.GetPods()) }).Should(Equal(1)) + + podNameNode1 = node1.GetPods()[0].Name + podNameNode2 = node2.GetPods()[0].Name + podNameNode3 = node3.GetPods()[0].Name + + result := ibpCRClient.Get().Namespace(namespace). + Resource(IBPORDERERS). + Name(node1.Name). + Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibpordererNode1 = ¤t.IBPOrderer{} + result.Into(ibpordererNode1) + }) + + Context("spec has restart flag set to true", func() { + It("performs restart action", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Restart = true + } + + err := integration.ResilientPatch(ibpCRClient, node1.Name, namespace, IBPORDERERS, 3, ¤t.IBPOrderer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + Eventually(node1.PodIsRunning).Should((Equal(true))) + + By("restarting orderer pods", func() { + Eventually(func() bool { + pods := node1.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podNameNode1 { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("setting restart flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Do(context.TODO()) + ibporderer := ¤t.IBPOrderer{} + result.Into(ibporderer) + + return ibporderer.Spec.Action.Restart + }).Should(Equal(false)) + }) + }) + }) + + Context("spec has ecert reenroll flag set to true", func() { + var ( + ecert, ekey []byte + + commonAssertions = func() { + By("restarting orderer pods", func() { + Eventually(func() bool { + pods := node1.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName != podNameNode1 { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("backing up old signcert", func() { + backup := GetBackup("ecert", node1.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + Expect(backup.List[len(backup.List)-1].SignCerts).To(Equal(base64.StdEncoding.EncodeToString(ecert))) + }) + + By("updating ecert signcert secret", func() { + updatedEcertSecret, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"])).To(Equal(false)) + }) + } + ) + + BeforeEach(func() { + ecertSecret, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ecert = ecertSecret.Data["cert.pem"] + + ecertSecret, err = kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ekey = ecertSecret.Data["key.pem"] + }) + + When("requesting a new key", func() { + It("gets a new key and certificate", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Reenroll.EcertNewKey = true + } + + err := integration.ResilientPatch(ibpCRClient, + node1.Name, + namespace, + IBPORDERERS, + 3, + ¤t.IBPOrderer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("generating a new key", func() { + updatedEcertKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(ekey, updatedEcertKey.Data["key.pem"])).To(Equal(false)) + }) + + By("setting reenroll flag back to false after restart", func() { + ibporderer := ¤t.IBPOrderer{} + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS). + Name(node1.Name).Do(context.TODO()) + result.Into(ibporderer) + + return ibporderer.Spec.Action.Reenroll.EcertNewKey + }).Should(Equal(false)) + }) + }) + }) + + When("reusing existing key", func() { + It("gets a new certificate", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Reenroll.Ecert = true + } + + err := integration.ResilientPatch(ibpCRClient, + node1.Name, + namespace, + IBPORDERERS, + 3, + ¤t.IBPOrderer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("not generating a new key", func() { + updatedEcertKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(ekey, updatedEcertKey.Data["key.pem"])).To(Equal(true)) + }) + + By("setting reenroll flag back to false after restart", func() { + ibporderer := ¤t.IBPOrderer{} + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS). + Name(node1.Name).Do(context.TODO()) + result.Into(ibporderer) + + return ibporderer.Spec.Action.Reenroll.Ecert + }).Should(Equal(false)) + }) + }) + }) + }) + + Context("spec has ecert enroll flag set to true", func() { + var ( + ecert []byte + ecertKey []byte + ) + + BeforeEach(func() { + ecertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + ecertKeySecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + ecert = ecertSecret.Data["cert.pem"] + ecertKey = ecertKeySecret.Data["key.pem"] + }) + + It("generates new crypto", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Enroll.Ecert = true + } + + err := integration.ResilientPatch(ibpCRClient, node1.Name, namespace, IBPORDERERS, 3, ¤t.IBPOrderer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + By("backing up old crypto", func() { + Eventually(func() bool { + backup := GetBackup("ecert", node1.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + return backup.List[len(backup.List)-1].SignCerts == base64.StdEncoding.EncodeToString(ecert) && + backup.List[len(backup.List)-1].KeyStore == base64.StdEncoding.EncodeToString(ecertKey) + }).Should(Equal(true)) + }) + + By("updating ecert signcert secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + By("updating ecert key secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecertKey, updatedEcertSecret.Data["key.pem"]) + }).Should(Equal(false)) + }) + + By("setting enroll flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Do(context.TODO()) + ibporderer := ¤t.IBPOrderer{} + result.Into(ibporderer) + + return ibporderer.Spec.Action.Enroll.Ecert + }).Should(Equal(false)) + }) + }) + }) + + Context("spec has tlscert reenroll flag set to true", func() { + var ( + tlsCert, tlsKey []byte + + commonAssertions = func() { + By("restarting orderer pods", func() { + Eventually(func() bool { + pods := node2.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName != podNameNode2 { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("updating tls signcert secret", func() { + updatedTLSSecret, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", node2.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(tlsCert, updatedTLSSecret.Data["cert.pem"])).To(Equal(false)) + }) + } + ) + + BeforeEach(func() { + tlsSecret, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", node2.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + tlsCert = tlsSecret.Data["cert.pem"] + + tlsSecret, err = kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", node2.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + tlsKey = tlsSecret.Data["key.pem"] + }) + + When("requesting a new key", func() { + It("gets a new key and certificate", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Reenroll.TLSCertNewKey = true + } + + err := integration.ResilientPatch( + ibpCRClient, + node2.Name, + namespace, + IBPORDERERS, + 3, + ¤t.IBPOrderer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("generating a new key", func() { + updatedEcertKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", node2.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(tlsKey, updatedEcertKey.Data["key.pem"])).To(Equal(false)) + }) + + By("setting reenroll flag back to false after restart", func() { + ibporderer := ¤t.IBPOrderer{} + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS). + Name(node2.Name).Do(context.TODO()) + result.Into(ibporderer) + + return ibporderer.Spec.Action.Reenroll.TLSCertNewKey + }).Should(Equal(false)) + }) + }) + }) + + When("reusing existing key", func() { + It("gets a new certificate", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Reenroll.TLSCert = true + } + + err := integration.ResilientPatch( + ibpCRClient, + node2.Name, + namespace, + IBPORDERERS, + 3, + ¤t.IBPOrderer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("not generating a new key", func() { + updatedEcertKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", node2.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(tlsKey, updatedEcertKey.Data["key.pem"])).To(Equal(true)) + }) + + By("setting reenroll flag back to false after restart", func() { + ibporderer := ¤t.IBPOrderer{} + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS). + Name(node2.Name).Do(context.TODO()) + result.Into(ibporderer) + + return ibporderer.Spec.Action.Reenroll.TLSCert + }).Should(Equal(false)) + }) + }) + }) + }) + + Context("spec has tlscert enroll flag set to true", func() { + var ( + tls []byte + ) + + BeforeEach(func() { + tlsSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", node3.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + tls = tlsSecret.Data["cert.pem"] + }) + + It("gets a new certificate", func() { + patch := func(o client.Object) { + ibporderer := o.(*current.IBPOrderer) + ibporderer.Spec.Action.Enroll.TLSCert = true + } + + err := integration.ResilientPatch(ibpCRClient, node3.Name, namespace, IBPORDERERS, 3, ¤t.IBPOrderer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + By("restarting orderer pods", func() { + Eventually(func() bool { + pods := node3.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName != podNameNode3 { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("backing up old signcert", func() { + backup := GetBackup("tls", node3.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + Expect(backup.List[len(backup.List)-1].SignCerts).To(Equal(base64.StdEncoding.EncodeToString(tls))) + }) + + By("updating tls signcert secret", func() { + updatedTLSSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", node3.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(bytes.Equal(tls, updatedTLSSecret.Data["cert.pem"])).To(Equal(false)) + }) + + By("setting reenroll flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS).Name(node3.Name).Do(context.TODO()) + ibporderer := ¤t.IBPOrderer{} + result.Into(ibporderer) + + return ibporderer.Spec.Action.Enroll.TLSCert + }).Should(Equal(false)) + }) + }) + }) + +}) + +func GetBackup(certType, name string) *common.Backup { + backupSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("%s-crypto-backup", name), metav1.GetOptions{}) + if err != nil { + Expect(k8serrors.IsNotFound(err)).To(Equal(true)) + return &common.Backup{} + } + + backup := &common.Backup{} + key := fmt.Sprintf("%s-backup.json", certType) + err = json.Unmarshal(backupSecret.Data[key], backup) + Expect(err).NotTo(HaveOccurred()) + + return backup +} diff --git a/integration/actions/peer/peer_suite_test.go b/integration/actions/peer/peer_suite_test.go new file mode 100644 index 00000000..82ac606e --- /dev/null +++ b/integration/actions/peer/peer_suite_test.go @@ -0,0 +1,265 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peer_test + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "k8s.io/client-go/kubernetes" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + peerAdminUsername = "peer-admin" + peerUsername = "peer" + + IBPCAS = "ibpcas" + IBPPEERS = "ibppeers" + + pathToRoot = "../../../" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA + org1peer *helper.Peer +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../../config/rbac/service_account.yaml", + OperatorRole: "../../../config/rbac/role.yaml", + OperatorRoleBinding: "../../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../../testdata/deploy/operator.yaml", + OrdererSecret: "../../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "peer-actions", pathToRoot) + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerUsername, "peerpw", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("peer2", "peerpw2", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering and enrolling peer admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerAdminUsername, "peer-adminpw", "admin"), "Register Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername)) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername+"2")) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Second Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + adminCertBytes, err := ioutil.ReadFile( + filepath.Join( + wd, + "org1peer", + peerAdminUsername, + "msp", + "signcerts", + "cert.pem", + ), + ) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + + By("starting Peer pod", func() { + org1peer = Org1Peer(profile.TLS.Cert, caHost, adminCertB64) + err = helper.CreatePeer(ibpCRClient, org1peer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, pathToRoot+"scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, Org1Peer("", "", "").Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func Org1Peer(tlsCert, caHost, adminCert string) *helper.Peer { + cr, err := helper.Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert) + Expect(err).NotTo(HaveOccurred()) + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/actions/peer/peer_test.go b/integration/actions/peer/peer_test.go new file mode 100644 index 00000000..bed58697 --- /dev/null +++ b/integration/actions/peer/peer_test.go @@ -0,0 +1,595 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peer_test + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("trigger peer actions", func() { + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + var ( + podName string + ibppeer *current.IBPPeer + ) + + BeforeEach(func() { + Eventually(func() int { return len(org1peer.GetRunningPods()) }).Should(Equal(1)) + podName = org1peer.GetRunningPods()[0].Name + + integration.ClearOperatorConfig(kclient, namespace) + }) + + When("spec has restart flag set to true", func() { + It("performs restart action", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Restart = true + } + + err := integration.ResilientPatch(ibpCRClient, org1peer.Name, namespace, IBPPEERS, 3, ¤t.IBPPeer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + + By("restarting peer pods", func() { + Eventually(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("setting restart flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.Restart + }).Should(Equal(false)) + }) + }) + }) + + When("spec has ecert reenroll flag set to true", func() { + var ( + ecert, ekey []byte + + commonAssertions = func() { + By("restarting peer pods", func() { + Eventually(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("backing up old signcert", func() { + backup := GetBackup("ecert", org1peer.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + Expect(backup.List[len(backup.List)-1].SignCerts).To(Equal(base64.StdEncoding.EncodeToString(ecert))) + }) + + By("updating ecert signcert secret", func() { + updatedEcertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"])).To(Equal(false)) + }) + } + ) + + BeforeEach(func() { + ecertSecret, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ecert = ecertSecret.Data["cert.pem"] + + ecertSecret, err = kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ekey = ecertSecret.Data["key.pem"] + }) + + It("gets a new certificate and key", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Reenroll.EcertNewKey = true + } + + err := integration.ResilientPatch( + ibpCRClient, + org1peer.Name, + namespace, + IBPPEERS, + 3, + ¤t.IBPPeer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("generating a new key", func() { + updatedEcertKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(ekey, updatedEcertKey.Data["key.pem"])).To(Equal(false)) + }) + + By("setting reenroll flag back to false after restart", func() { + ibppeer := ¤t.IBPPeer{} + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + result.Into(ibppeer) + + return ibppeer.Spec.Action.Reenroll.EcertNewKey + }).Should(Equal(false)) + }) + }) + + It("gets a new certificate", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Reenroll.Ecert = true + } + + err := integration.ResilientPatch( + ibpCRClient, + org1peer.Name, + namespace, + IBPPEERS, + 3, + ¤t.IBPPeer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("not generating a new key", func() { + updatedEcertKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(ekey, updatedEcertKey.Data["key.pem"])).To(Equal(true)) + }) + + By("setting reenroll flag back to false after restart", func() { + ibppeer := ¤t.IBPPeer{} + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + result.Into(ibppeer) + + return ibppeer.Spec.Action.Reenroll.Ecert + }).Should(Equal(false)) + }) + }) + }) + + When("spec has TLS reenroll flag set to true", func() { + var ( + cert, key []byte + + commonAssertions = func() { + By("restarting peer pods", func() { + Eventually(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("backing up old signcert", func() { + backup := GetBackup("tls", org1peer.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + Expect(backup.List[len(backup.List)-1].SignCerts).To(Equal(base64.StdEncoding.EncodeToString(cert))) + }) + + By("updating tls signcert secret", func() { + updatedTLSSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(bytes.Equal(cert, updatedTLSSecret.Data["cert.pem"])).To(Equal(false)) + }) + } + ) + + BeforeEach(func() { + tlsSecret, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + cert = tlsSecret.Data["cert.pem"] + + tlsSecret, err = kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + key = tlsSecret.Data["key.pem"] + }) + + When("requesting a new key", func() { + It("gets a new key and certificate", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Reenroll.TLSCertNewKey = true + } + + err := integration.ResilientPatch( + ibpCRClient, + org1peer.Name, + namespace, + IBPPEERS, + 3, + ¤t.IBPPeer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("generating a new key", func() { + updatedKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(bytes.Equal(key, updatedKey.Data["key.pem"])).To(Equal(false)) + }) + + By("setting reenroll flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.Reenroll.TLSCertNewKey + }).Should(Equal(false)) + }) + }) + }) + + When("reusing existing key", func() { + It("gets a new certificate", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Reenroll.TLSCert = true + } + + err := integration.ResilientPatch( + ibpCRClient, + org1peer.Name, + namespace, + IBPPEERS, + 3, + ¤t.IBPPeer{}, + patch) + Expect(err).NotTo(HaveOccurred()) + + commonAssertions() + + By("not generating a new key", func() { + updatedKey, err := kclient.CoreV1().Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes.Equal(key, updatedKey.Data["key.pem"])).To(Equal(true)) + }) + + By("setting reenroll flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.Reenroll.TLSCert + }).Should(Equal(false)) + }) + }) + }) + }) + + When("spec has ecert enroll flag set to true", func() { + var ( + ecert []byte + ecertKey []byte + ) + + BeforeEach(func() { + ecertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + ecertKeySecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + ecert = ecertSecret.Data["cert.pem"] + ecertKey = ecertKeySecret.Data["key.pem"] + }) + + It("generates new crypto", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Enroll.Ecert = true + } + + err := integration.ResilientPatch(ibpCRClient, org1peer.Name, namespace, IBPPEERS, 3, ¤t.IBPPeer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + By("backing up old crypto", func() { + Eventually(func() bool { + backup := GetBackup("ecert", org1peer.Name) + if len(backup.List) == 0 { + return false + } + + return backup.List[len(backup.List)-1].SignCerts == base64.StdEncoding.EncodeToString(ecert) && + backup.List[len(backup.List)-1].KeyStore == base64.StdEncoding.EncodeToString(ecertKey) + }).Should(Equal(true)) + }) + + By("updating ecert signcert secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + By("updating ecert key secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecertKey, updatedEcertSecret.Data["key.pem"]) + }).Should(Equal(false)) + }) + + By("setting ecert action flag back to false in spec after completion", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace). + Resource(IBPPEERS). + Name(org1peer.Name). + Do(context.TODO()) + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.Enroll.Ecert + }).Should(Equal(false)) + }) + }) + }) + + When("spec has tls enroll flag set to true", func() { + var ( + tlscert []byte + tlskey []byte + ) + + BeforeEach(func() { + tlscertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + tlskeySecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + tlscert = tlscertSecret.Data["cert.pem"] + tlskey = tlskeySecret.Data["key.pem"] + }) + + It("generates new crypto", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.Enroll.TLSCert = true + } + + err := integration.ResilientPatch(ibpCRClient, org1peer.Name, namespace, IBPPEERS, 3, ¤t.IBPPeer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + By("backing up old crypto", func() { + Eventually(func() bool { + backup := GetBackup("tls", org1peer.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + return backup.List[len(backup.List)-1].SignCerts == base64.StdEncoding.EncodeToString(tlscert) && + backup.List[len(backup.List)-1].KeyStore == base64.StdEncoding.EncodeToString(tlskey) + }).Should(Equal(true)) + }) + + By("updating ecert signcert secret", func() { + Eventually(func() bool { + updatedTlscertSecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(tlscert, updatedTlscertSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + By("updating ecert key secret", func() { + Eventually(func() bool { + updatedTlskeySecret, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(tlskey, updatedTlskeySecret.Data["key.pem"]) + }).Should(Equal(false)) + }) + + By("setting TLS action flag back to false in spec after completion", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace). + Resource(IBPPEERS). + Name(org1peer.Name). + Do(context.TODO()) + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.Enroll.TLSCert + }).Should(Equal(false)) + }) + }) + }) + + Context("upgrade dbs", func() { + var ( + migrationJobName string + err error + ) + + It("performs db reset job", func() { + patch := func(o client.Object) { + ibppeer = o.(*current.IBPPeer) + ibppeer.Spec.Action.UpgradeDBs = true + } + + err = integration.ResilientPatch(ibpCRClient, org1peer.Name, namespace, IBPPEERS, 3, ¤t.IBPPeer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + By("starting migration job", func() { + Eventually(func() bool { + migrationJobName, err = helper.GetJobID(kclient, namespace, fmt.Sprintf("%s-dbmigration", ibppeer.Name)) + if err != nil { + return false + } + + _, err = kclient.BatchV1().Jobs(namespace). + Get(context.TODO(), migrationJobName, metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("clearing out reset value after completion", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace). + Resource(IBPPEERS). + Name(org1peer.Name). + Do(context.TODO()) + + Expect(result.Error()).NotTo(HaveOccurred()) + + ibppeer = ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.UpgradeDBs + }).Should(Equal(false)) + }) + + By("removing migration job", func() { + Eventually(func() bool { + _, err := kclient.BatchV1().Jobs(namespace). + Get(context.TODO(), migrationJobName, metav1.GetOptions{}) + if err != nil { + return true + } + return false + }).Should(Equal(true)) + }) + + By("removing migration pod", func() { + Eventually(func() bool { + podList, err := kclient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("job-name=%s-dbmigration", ibppeer.Name), + }) + if err != nil { + return true + } + + if len(podList.Items) == 0 { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + }) + +}) + +func GetBackup(certType, name string) *common.Backup { + backupSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("%s-crypto-backup", name), metav1.GetOptions{}) + if err != nil { + Expect(k8serrors.IsNotFound(err)).To(Equal(true)) + return &common.Backup{} + } + + backup := &common.Backup{} + key := fmt.Sprintf("%s-backup.json", certType) + err = json.Unmarshal(backupSecret.Data[key], backup) + Expect(err).NotTo(HaveOccurred()) + + return backup +} diff --git a/integration/actions/peer/reenroll_test.go b/integration/actions/peer/reenroll_test.go new file mode 100644 index 00000000..c5657d32 --- /dev/null +++ b/integration/actions/peer/reenroll_test.go @@ -0,0 +1,123 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peer_test + +import ( + "bytes" + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This test is designed to stress-test reenroll functionality +// NOTE: need to set Restart.WaitTime = 0 in operator config +var _ = PDescribe("reenroll action", func() { + BeforeEach(func() { + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("reenroll peer", func() { + const ( + // Modify to stress-test reenroll functionality + numReenrolls = 1 + ) + + When("spec has ecert &tlscert reenroll flag set to true", func() { + var ( + ecert []byte + tcert []byte + ) + + It("reenrolls ecert & tlscert for numReenrolls amount of times", func() { + count := 1 + for count <= numReenrolls { + fmt.Printf("REENROLL COUNT: %d\n", count) + + ecertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ecert = ecertSecret.Data["cert.pem"] + + tlsSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + tcert = tlsSecret.Data["cert.pem"] + + patch := func(o client.Object) { + ibppeer := o.(*current.IBPPeer) + ibppeer.Spec.Action.Reenroll.Ecert = true + ibppeer.Spec.Action.Reenroll.TLSCert = true + } + + err = integration.ResilientPatch(ibpCRClient, org1peer.Name, namespace, IBPPEERS, 3, ¤t.IBPPeer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + fmt.Printf("APPLIED PATCH NUMBER: %d\n", count) + + By("updating ecert signcert secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + By("updating tls signcert secret", func() { + Eventually(func() bool { + updatedTLSSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(tcert, updatedTLSSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + time.Sleep(10 * time.Second) + + By("setting reenroll flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + return ibppeer.Spec.Action.Reenroll.Ecert && + ibppeer.Spec.Action.Reenroll.TLSCert + }).Should(Equal(false)) + }) + + count++ + } + + }) + }) + }) +}) diff --git a/integration/autorenew/autorenew_suite_test.go b/integration/autorenew/autorenew_suite_test.go new file mode 100644 index 00000000..04c099ba --- /dev/null +++ b/integration/autorenew/autorenew_suite_test.go @@ -0,0 +1,327 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package autorenew_test + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "k8s.io/client-go/kubernetes" +) + +func TestAutorenew(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Autorenew Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + peerAdminUsername = "peer-admin" + peerUsername = "peer" + ordererUsername = "orderer" + + IBPCAS = "ibpcas" + IBPPEERS = "ibppeers" + IBPORDERERS = "ibporderers" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA + org1peer *helper.Peer + orderer *helper.Orderer +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "autorenew", "") + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerUsername, "peerpw", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("peer2", "peerpw2", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering and enrolling peer admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerAdminUsername, "peer-adminpw", "admin"), "Register Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername)) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername+"2")) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Second Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(ordererUsername, "ordererpw", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("orderer2", "ordererpw2", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + adminCertBytes, err := ioutil.ReadFile( + filepath.Join( + wd, + "org1peer", + peerAdminUsername, + "msp", + "signcerts", + "cert.pem", + ), + ) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + + By("starting Peer pod", func() { + org1peer = Org1Peer(profile.TLS.Cert, caHost, adminCertB64) + err = helper.CreatePeer(ibpCRClient, org1peer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + By("starting Orderer pod", func() { + orderer = GetOrderer(profile.TLS.Cert, caHost) + err = helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(org1peer.PodCreated).Should((Equal(true))) + Eventually(orderer.Nodes[0].PodCreated).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, "../../scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, Org1Peer("", "", "").Name)) + os.RemoveAll(filepath.Join(wd, GetOrderer("", "").Nodes[0].Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func Org1Peer(tlsCert, caHost, adminCert string) *helper.Peer { + cr, err := helper.Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert) + Expect(err).NotTo(HaveOccurred()) + + // 1 year - 30s + cr.Spec.NumSecondsWarningPeriod = 31535970 + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetOrderer(tlsCert, caHost string) *helper.Orderer { + cr, err := helper.OrdererCR(namespace, domain, ordererUsername, tlsCert, caHost) + Expect(err).NotTo(HaveOccurred()) + + // 1 year - 30s + cr.Spec.NumSecondsWarningPeriod = 31535970 + + nodes := []helper.Orderer{ + helper.Orderer{ + Name: cr.Name + "node1", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = cr.Name + "node1" + + return &helper.Orderer{ + Name: cr.Name, + Namespace: namespace, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + Nodes: nodes, + CRClient: ibpCRClient, + } +} diff --git a/integration/autorenew/autorenew_test.go b/integration/autorenew/autorenew_test.go new file mode 100644 index 00000000..f69a6498 --- /dev/null +++ b/integration/autorenew/autorenew_test.go @@ -0,0 +1,213 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package autorenew_test + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Autorenew", func() { + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("orderer", func() { + var ( + node1 helper.Orderer + + tlscert []byte + ecert []byte + ) + + BeforeEach(func() { + node1 = orderer.Nodes[0] + Eventually(node1.PodCreated, time.Second*60, time.Second*2).Should((Equal(true))) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + BeforeEach(func() { + ecertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ecert = ecertSecret.Data["cert.pem"] + + tlsSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + tlscert = tlsSecret.Data["cert.pem"] + }) + + When("signcert certificate is up for renewal and enrollment spec exists", func() { + It("only renews the ecert when timer goes off", func() { + // signcert certificates expire in 1 year (31536000s) from creation; + // NumSecondsWarningPeriod has been set to 1 year - 60s to make + // renewal occur when test runs + + By("setting status to warning", func() { + Eventually(orderer.PollForParentCRStatus).Should(Equal(current.Warning)) + }) + + By("backing up old ecert signcert", func() { + Eventually(func() bool { + backup := GetBackup("ecert", node1.Name) + if len(backup.List) > 0 { + return backup.List[len(backup.List)-1].SignCerts == base64.StdEncoding.EncodeToString(ecert) + } + + return false + }).Should(Equal(true)) + + }) + + By("reenrolling identity and updating ecert certificate secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + By("not updating tls signcert secret", func() { + Eventually(func() bool { + updatedTLSSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(tlscert, updatedTLSSecret.Data["cert.pem"]) + }).Should(Equal(true)) + }) + + By("returning to Deployed status as tls cert won't expire for 10 years", func() { + Eventually(orderer.PollForParentCRStatus).Should(Equal(current.Deployed)) + }) + + }) + }) + }) + + Context("peer", func() { + var ( + tlscert []byte + ecert []byte + ) + + BeforeEach(func() { + Eventually(org1peer.PodCreated, time.Second*60, time.Second*2).Should((Equal(true))) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + BeforeEach(func() { + ecertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ecert = ecertSecret.Data["cert.pem"] + + tlsSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + tlscert = tlsSecret.Data["cert.pem"] + }) + + When("signcert certificate is up for renewal and enrollment spec exists", func() { + It("only renews the ecert when timer goes off", func() { + // signcert certificates expire in 1 year (31536000s) from creation; + // NumSecondsWarningPeriod has been set to 1 year - 60s to make + // renewal occur when test runs + + By("setting status to warning", func() { + Eventually(org1peer.PollForCRStatus).Should(Equal(current.Warning)) + }) + + By("backing up old ecert signcert", func() { + Eventually(func() bool { + backup := GetBackup("ecert", org1peer.Name) + if len(backup.List) > 0 { + return backup.List[len(backup.List)-1].SignCerts == base64.StdEncoding.EncodeToString(ecert) + } + + return false + }).Should(Equal(true)) + }) + + By("reenrolling identity and updating ecert certificate secret", func() { + Eventually(func() bool { + updatedEcertSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(ecert, updatedEcertSecret.Data["cert.pem"]) + }).Should(Equal(false)) + }) + + By("not updating tls signcert secret", func() { + Eventually(func() bool { + updatedTLSSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return bytes.Equal(tlscert, updatedTLSSecret.Data["cert.pem"]) + }).Should(Equal(true)) + }) + + By("returning to Deployed status as tls cert won't expire for 10 years", func() { + Eventually(org1peer.PollForCRStatus).Should(Equal(current.Deployed)) + }) + + }) + }) + }) +}) + +func GetBackup(certType, name string) *common.Backup { + backupSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("%s-crypto-backup", name), metav1.GetOptions{}) + if err != nil { + Expect(k8serrors.IsNotFound(err)).To(Equal(true)) + return &common.Backup{} + } + + backup := &common.Backup{} + key := fmt.Sprintf("%s-backup.json", certType) + err = json.Unmarshal(backupSecret.Data[key], backup) + Expect(err).NotTo(HaveOccurred()) + + return backup +} diff --git a/integration/ca/ca_suite_test.go b/integration/ca/ca_suite_test.go new file mode 100644 index 00000000..275a9a29 --- /dev/null +++ b/integration/ca/ca_suite_test.go @@ -0,0 +1,146 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ca_test + +import ( + "context" + "os" + "strings" + "testing" + "time" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} + +const ( + // This TLS certificate is encoded for the DNS domain aliases 127.0.0.1, localhost, and *.vcap.me and is good for 5 years: + // + // notAfter: "2027-05-24T03:14:42Z" + // notBefore: "2022-05-25T03:14:42Z" + // renewalTime: "2025-09-22T19:14:42Z" + // + // This certificate was generated with cert-manager.io using a self-signed issuer for the root CA. + // If tests start to fail for TLS handshake errors, the certificate will need to be renewed or reissued. + tlsCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqakNDQVRTZ0F3SUJBZ0lRVXRIS2NUTWNZS21KblVtbEJNZW94REFLQmdncWhrak9QUVFEQWpBbE1TTXcKSVFZRFZRUURFeHBtWVdKeWFXTXRZMkV0YVc1MFpXZHlZWFJwYjI0dGRHVnpkREFlRncweU1qQTFNalV3TXpFMApORGRhRncweU56QTFNalF3TXpFME5EZGFNQUF3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVRwCjN2d3RMZFlyUzFTNVFSUmFqRjJReHFIYWllMUo2dzlHM2RwQklLYWwwTTlYaUttR0Q4eFBvRkpkcENNZTZWdDIKeml1UjZrU2FNL3lXQmU4TGd5eExvMnN3YVRBT0JnTlZIUThCQWY4RUJBTUNCYUF3REFZRFZSMFRBUUgvQkFJdwpBREFmQmdOVkhTTUVHREFXZ0JRdkVBWWdjZEwwa0ljWEtDaGVmVzg3NW8vYnd6QW9CZ05WSFJFQkFmOEVIakFjCmdnbHNiMk5oYkdodmMzU0NDU291ZG1OaGNDNXRaWWNFZndBQUFUQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpQXUKMEpLY29lQmhYajJnbmQ1cjE5THUxeEVwdG1kelFoazh5OXFTRkZ2dkF3SWhBSWp5Z1VLY2tzQkk4a1dBeVNlbQp0VzJ4cVE3RVZkTmR6WDZYbWwrNVBQengKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + tlsKey = "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUhoWWFRbDViYXZVR3FJd2prK3YrODNmYzNIamZuRVdueEFQbjJ5OFRTUWRvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNmQ3OExTM1dLMHRVdVVFVVdveGRrTWFoMm9udFNlc1BSdDNhUVNDbXBkRFBWNGlwaGcvTQpUNkJTWGFRakh1bGJkczRya2VwRW1qUDhsZ1h2QzRNc1N3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=" + trustedRootTLSCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpekNDQVRDZ0F3SUJBZ0lRZXZWM2VUZmh3WlNHYVI4aXhTR1hRakFLQmdncWhrak9QUVFEQWpBbE1TTXcKSVFZRFZRUURFeHBtWVdKeWFXTXRZMkV0YVc1MFpXZHlZWFJwYjI0dGRHVnpkREFlRncweU1qQTFNalV3TXpFMApOREphRncweU56QTFNalF3TXpFME5ESmFNQ1V4SXpBaEJnTlZCQU1UR21aaFluSnBZeTFqWVMxcGJuUmxaM0poCmRHbHZiaTEwWlhOME1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRXlzc2d3dFo2dlI3a2svbUsKYUFUZE45TEhmTWsrYXMxcm8rM24za1N2QTFuVEFCa1V6bVdGNlhCS1I5eUh6V3dwZTlHL0o3L3MrenZsME5GOApRZGdzenFOQ01FQXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGQzhRQmlCeDB2U1FoeGNvS0Y1OWJ6dm1qOXZETUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDSVFEaXo1SnoKeGhKcjQ4SlpRRkpzd1dteTRCU21FWXp0NXFmUmsyMFhyRzI4M3dJaEFLaDBXMmkxcFpiY0lPODBXSmhlVkxzSQpDM0JGMk5McTBsVlhXanNGQVVndQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" +) + +var ( + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + namespaceSuffix = "ca" + testFailed bool +) + +var ( + defaultRequests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimits = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(240 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, namespaceSuffix, "") + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + err := integration.Cleanup(GinkgoWriter, kclient, namespace) + Expect(err).NotTo(HaveOccurred()) +}) + +type CA struct { + helper.CA + + expectedRequests corev1.ResourceList + expectedLimits corev1.ResourceList +} + +func (ca *CA) resourcesRequestsUpdated() bool { + dep, err := kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + updatedRequests := dep.Spec.Template.Spec.Containers[0].Resources.Requests + if updatedRequests[corev1.ResourceCPU] == ca.expectedRequests[corev1.ResourceCPU] { + if updatedRequests[corev1.ResourceMemory] == ca.expectedRequests[corev1.ResourceMemory] { + return true + } + } + return false +} + +func (ca *CA) resourcesLimitsUpdated() bool { + dep, err := kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + updatedLimits := dep.Spec.Template.Spec.Containers[0].Resources.Limits + if updatedLimits[corev1.ResourceCPU] == ca.expectedLimits[corev1.ResourceCPU] { + if updatedLimits[corev1.ResourceMemory] == ca.expectedLimits[corev1.ResourceMemory] { + return true + } + } + return false +} diff --git a/integration/ca/ca_test.go b/integration/ca/ca_test.go new file mode 100644 index 00000000..2627f8af --- /dev/null +++ b/integration/ca/ca_test.go @@ -0,0 +1,788 @@ +//go:build !pkcs11 +// +build !pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ca_test + +import ( + "context" + "encoding/json" + "fmt" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/yaml" +) + +var _ = Describe("Interaction between IBP-Operator and Kubernetes cluster", func() { + var ( + err error + ca *CA + ca2 *CA + ca3 *CA + ) + + BeforeEach(func() { + ca = GetCA1() + err = helper.CreateCA(ibpCRClient, ca.CR) + Expect(err).NotTo(HaveOccurred()) + + ca2 = GetCA2() + err = helper.CreateCA(ibpCRClient, ca2.CR) + Expect(err).NotTo(HaveOccurred()) + + ca3 = GetCA3() + err = helper.CreateCA(ibpCRClient, ca3.CR) + Expect(err).NotTo(HaveOccurred()) + + integration.ClearOperatorConfig(kclient, namespace) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("IBPCA controller", func() { + Context("applying the first instance of IBPCA CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + It("creates a IBPCA custom resource", func() { + By("setting the CR status to deploying", func() { + Eventually(ca.PollForCRStatus).Should((Equal(current.Deploying))) + }) + + By("creating a service", func() { + Eventually(ca.ServiceExists).Should((Equal(true))) + }) + + By("creating a configmap", func() { + Eventually(ca.ConfigMapExists).Should((Equal(true))) + }) + + By("starting a ingress", func() { + Eventually(ca.IngressExists).Should((Equal(true))) + }) + + By("creating a deployment", func() { + Eventually(ca.DeploymentExists).Should((Equal(true))) + }) + + By("starting a pod", func() { + Eventually(ca.PodIsRunning).Should((Equal(true))) + }) + + By("creating config map that contains spec", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), ca.Name+"-spec", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating secret with crypto for CA", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), ca.Name+"-ca-crypto", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil()) + Expect(len(secret.Data)).To(Equal(6)) + }) + + By("creating secret with crypto for TLS CA", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), ca.Name+"-tlsca-crypto", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil()) + Expect(len(secret.Data)).To(Equal(2)) + }) + + By("setting the CR status to deployed when pod is running", func() { + Eventually(ca.PollForCRStatus).Should((Equal(current.Deployed))) + }) + }) + + It("should not find zone and region", func() { + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // TODO :: only run these when using MZ clusters + // By("checking zone", func() { + // Expect(ca.TestAffinityZone(dep)).To((Equal(false))) + // }) + + // By("checking region", func() { + // Expect(ca.TestAffinityRegion(dep)).To((Equal(false))) + // }) + }) + + When("the custom resource is updated", func() { + var ( + err error + dep *appsv1.Deployment + newResourceRequests corev1.ResourceList + newResourceLimits corev1.ResourceList + ) + + BeforeEach(func() { + newResourceRequests = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("55m"), + corev1.ResourceMemory: resource.MustParse("110M"), + } + newResourceLimits = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("55m"), + corev1.ResourceMemory: resource.MustParse("110M"), + } + ca.expectedRequests = newResourceRequests + ca.expectedLimits = newResourceLimits + + Eventually(ca.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("updates the instance of IBPCA if resources are updated in CR", func() { + currentResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(currentResources.Requests).To(Equal(defaultRequests)) + Expect(currentResources.Limits).To(Equal(defaultLimits)) + + ca.CR.Spec.Resources = ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: newResourceRequests, + Limits: newResourceLimits, + }, + } + + caOverrides := &v1.ServerConfig{} + err := json.Unmarshal(ca.CR.Spec.ConfigOverride.CA.Raw, caOverrides) + Expect(err).NotTo(HaveOccurred()) + caOverrides.CAConfig.CA = v1.CAInfo{ + Name: "new-ca", + } + + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + ca.CR.Spec.ConfigOverride.CA = &runtime.RawExtension{Raw: *caJson} + + tlscaOverrides := &v1.ServerConfig{} + err = json.Unmarshal(ca.CR.Spec.ConfigOverride.TLSCA.Raw, tlscaOverrides) + Expect(err).NotTo(HaveOccurred()) + tlscaOverrides.CAConfig.CA = v1.CAInfo{ + Name: "new-tlsca", + } + + tlscaJson, err := util.ConvertToJsonMessage(tlscaOverrides) + Expect(err).NotTo(HaveOccurred()) + ca.CR.Spec.ConfigOverride.TLSCA = &runtime.RawExtension{Raw: *tlscaJson} + + bytes, err := json.Marshal(ca.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibpcas").Name(ca.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 120*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == newResourceRequests.Cpu().MilliValue() { + return true, nil + } + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + + Eventually(ca.resourcesRequestsUpdated).Should(Equal(true)) + Eventually(ca.resourcesLimitsUpdated).Should(Equal(true)) + + By("updating the config map with new values from override for ecert and tls ca", func() { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), fmt.Sprintf("%s-ca-config", ca.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + serverconfig := &v1.ServerConfig{} + err = yaml.Unmarshal(cm.BinaryData["fabric-ca-server-config.yaml"], serverconfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(serverconfig.CAConfig.CA.Name).To(Equal("new-ca")) + + cm, err = kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), fmt.Sprintf("%s-tlsca-config", ca.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + serverconfig = &v1.ServerConfig{} + err = yaml.Unmarshal(cm.BinaryData["fabric-ca-server-config.yaml"], serverconfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(serverconfig.CAConfig.CA.Name).To(Equal("new-tlsca")) + + By("restarting deployment for ecert ca", func() { + // Pod should first go away, and deployment is restarted + // Eventually(ca.PodIsRunning).Should((Equal(false))) // FLAKY TEST + // Pod should eventually then go into running state + Eventually(ca.PodIsRunning).Should((Equal(true))) + }) + + }) + + }) + }) + + When("a deployment managed by operator is manually edited", func() { + var ( + err error + dep *appsv1.Deployment + ) + + BeforeEach(func() { + Eventually(ca.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("restores states", func() { + + // Reduce the deployment resource requests + origRequests := dep.Spec.Template.Spec.Containers[0].Resources.Requests + newResourceRequests := corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("50M"), + } + Expect(newResourceRequests).ToNot(Equal(origRequests)) + + dep.Spec.Template.Spec.Containers[0].Resources.Requests = newResourceRequests + depBytes, err := json.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + // After patching, the resource limits should have been reduced to the lower values + dep, err = kclient.AppsV1().Deployments(namespace).Patch(context.TODO(), ca.Name, types.MergePatchType, depBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(newResourceRequests)) + + // And with get resource, not just the deployment returned by patch + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(newResourceRequests)) + + // But the operator prevails: resource limits will be reset to the original amount specified in the CRD + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == origRequests.Cpu().MilliValue() { + return true, nil + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(origRequests)) + }) + }) + }) + + Context("applying the second instance of IBPCA CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + BeforeEach(func() { + Eventually(ca2.PodIsRunning).Should((Equal(true))) + }) + + It("should find zone and region", func() { + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca2.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), ca2.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // TODO :: only run these when using MZ clusters + // By("checking zone", func() { + // Expect(ca2.TestAffinityZone(dep)).To((Equal(true))) + // }) + + // By("checking region", func() { + // Expect(ca2.TestAffinityRegion(dep)).To((Equal(true))) + // }) + }) + + When("fabric version is updated", func() { + BeforeEach(func() { + ibpca := ¤t.IBPCA{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpcas").Name(ca2.Name).Do(context.TODO()) + result.Into(ibpca) + + ibpca.Spec.FabricVersion = integration.FabricCAVersion + "-1" + bytes, err := json.Marshal(ibpca) + Expect(err).NotTo(HaveOccurred()) + + result = ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibpcas").Name(ca2.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + It("sets images mapped to version", func() { + Eventually(func() current.CAImages { + ibpca := ¤t.IBPCA{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpcas").Name(ca2.Name).Do(context.TODO()) + result.Into(ibpca) + fmt.Println("ca images ") + fmt.Printf("%+v", *ibpca.Spec.Images) + return *ibpca.Spec.Images + }).Should(Equal(current.CAImages{ + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + CAImage: integration.CaImage, + CATag: integration.CaTag, + })) + }) + }) + }) + + Context("applying incorrectly configured third instance of IBPCA CR", func() { + It("should set the CR status to error", func() { + Eventually(ca3.PollForCRStatus).Should((Equal(current.Error))) + + crStatus := ¤t.IBPCA{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpcas").Name(ca3.Name).Do(context.TODO()) + result.Into(crStatus) + + Expect(crStatus.Status.Message).To(ContainSubstring("Failed to provide database configuration for TLSCA to support greater than 1 replicas")) + }) + }) + + Context("pod restart", func() { + var ( + oldPodName string + ) + Context("should not trigger deployment restart if config overrides not updated", func() { + BeforeEach(func() { + Eventually(ca.PodIsRunning).Should((Equal(true))) + + Eventually(func() int { + return len(ca.GetPods()) + }).Should(Equal(1)) + + pods := ca.GetPods() + oldPodName = pods[0].Name + }) + + It("does not restart the ca pod", func() { + Eventually(ca.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := ca.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == oldPodName { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + + Context("should trigger deployment restart if config overrides updated", func() { + BeforeEach(func() { + Eventually(ca.PodIsRunning).Should((Equal(true))) + + Eventually(func() int { + return len(ca.GetPods()) + }).Should(Equal(1)) + + pods := ca.GetPods() + oldPodName = pods[0].Name + + caOverrides := &v1.ServerConfig{} + err = json.Unmarshal(ca.CR.Spec.ConfigOverride.CA.Raw, caOverrides) + Expect(err).NotTo(HaveOccurred()) + caOverrides.CAConfig.CA = v1.CAInfo{ + Name: "new-ca", + } + + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + ca.CR.Spec.ConfigOverride.CA = &runtime.RawExtension{Raw: *caJson} + + bytes, err := json.Marshal(ca.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibpcas").Name(ca.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + It("restarts the ca pod", func() { + + // FLAKY TEST: Checking for pod not running before pod is running causes test to be flaky + // due to the rolling restart nature of our component restarts. Sometimes, a new pod + // comes up quicker than this test can check for a non-running pod, so it will never + // detect that the pod was being terminated before a new one come up. + // Eventually(ca.PodIsRunning, 240*time.Second, 500*time.Millisecond).Should((Equal(false))) + Eventually(ca.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := ca.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == oldPodName { + return false + } + + return true + }).Should(Equal(true)) + }) + }) + }) + + Context("enroll intermediate ca", func() { + BeforeEach(func() { + Eventually(ca.PodIsRunning).Should((Equal(true))) + }) + + It("enrolls with root ca", func() { + ica := GetIntermediateCA() + helper.CreateCA(ibpCRClient, ica.CR) + + Eventually(ica.PodIsRunning).Should((Equal(true))) + }) + }) + + Context("delete crs", func() { + It("should delete IBPCA CR", func() { + By("deleting the first instance of IBPCA CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibpcas").Name(ca.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + By("deleting the second instance of IBPCA CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibpcas").Name(ca2.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + By("deleting the third instance of IBPCA CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibpcas").Name(ca3.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + }) + }) + }) +}) + +func GetCA1() *CA { + caOverrides := &v1.ServerConfig{ + Debug: pointer.True(), + TLS: v1.ServerTLSConfig{ + CertFile: tlsCert, + KeyFile: tlsKey, + }, + CAConfig: v1.CAConfig{ + CA: v1.CAInfo{ + Name: "ca", + Certfile: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNVakNDQWZpZ0F3SUJBZ0lSQUtSTFhRQm02WUo5ODlhRGQxVmRxM2d3Q2dZSUtvWkl6ajBFQXdJd2N6RUwKTUFrR0ExVUVCaE1DVlZNeEV6QVJCZ05WQkFnVENrTmhiR2xtYjNKdWFXRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhHVEFYQmdOVkJBb1RFRzl5WnpFdVpYaGhiWEJzWlM1amIyMHhIREFhQmdOVkJBTVRFMk5oCkxtOXlaekV1WlhoaGJYQnNaUzVqYjIwd0hoY05NakF3TkRBNU1EQTBOekF3V2hjTk16QXdOREEzTURBME56QXcKV2pCek1Rc3dDUVlEVlFRR0V3SlZVekVUTUJFR0ExVUVDQk1LUTJGc2FXWnZjbTVwWVRFV01CUUdBMVVFQnhNTgpVMkZ1SUVaeVlXNWphWE5qYnpFWk1CY0dBMVVFQ2hNUWIzSm5NUzVsZUdGdGNHeGxMbU52YlRFY01Cb0dBMVVFCkF4TVRZMkV1YjNKbk1TNWxlR0Z0Y0d4bExtTnZiVEJaTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUEKQkxJMENnNlFMTDZqOWdZQkZsQ3k1RTVWSC8vUHJoSUhwZ0ZNQ3VRUXJ4WUM2Y3dBbGdhS1g3Tmd4QzQrenE2dApUaU54OGtSd3h3NTRrQ2N0ZnZQdU1DMmpiVEJyTUE0R0ExVWREd0VCL3dRRUF3SUJwakFkQmdOVkhTVUVGakFVCkJnZ3JCZ0VGQlFjREFnWUlLd1lCQlFVSEF3RXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QXBCZ05WSFE0RUlnUWcKRlhXeWVGYlpMaFRHTko5MzVKQm85bFMyM284cm13SjJSQnZXaDlDMldJa3dDZ1lJS29aSXpqMEVBd0lEU0FBdwpSUUloQUxVcUU5a2F2U0NmbEV6U25ERUhIdVh1ZjR4MEhUbnU3eGtNOXArNW5PcnBBaUF1aE5NWXhxbjU5MUpLCjdWRGFPK0k0eVVWZEViNGxiRlFBZUJiR1FTdkxDdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0==", + Keyfile: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1FNWnEwdFY4Mjl0UUZQcS8KcSswZnNES0p6MDdnd0dpS0FUNEMwTG9qSnpDaFJBTkNBQVN5TkFvT2tDeStvL1lHQVJaUXN1Uk9WUi8vejY0UwpCNllCVEFya0VLOFdBdW5NQUpZR2lsK3pZTVF1UHM2dXJVNGpjZkpFY01jT2VKQW5MWDd6N2pBdAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0t", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + + tlscaOverrides := v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CA: v1.CAInfo{ + Name: "tlsca-ca1", + }, + }, + } + tlscaJson, err := util.ConvertToJsonMessage(tlscaOverrides) + Expect(err).NotTo(HaveOccurred()) + + name := "ibpca1" + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + // TODO:OSS + Domain: domain, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + RegistryURL: "no-registry-url", + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: defaultRequests, + Limits: defaultLimits, + }, + }, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + TLSCA: &runtime.RawExtension{Raw: *tlscaJson}, + }, + FabricVersion: integration.FabricCAVersion, + }, + } + + return &CA{ + CA: helper.CA{ + Name: name, + Namespace: namespace, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, + } +} + +func GetCA2() *CA { + caOverrides := &v1.ServerConfig{ + Debug: pointer.True(), + TLS: v1.ServerTLSConfig{ + CertFile: tlsCert, + KeyFile: tlsKey, + }, + } + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + + name := "ibpca2" + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + RegistryURL: "no-registry-url", + FabricVersion: integration.FabricCAVersion, + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: defaultRequests, + Limits: defaultLimits, + }, + }, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + }, + Zone: "select", + Region: "select", + Domain: domain, + CustomNames: current.CACustomNames{ + Sqlite: "/data/fabric-ca-server.db", + }, + }, + } + cr.Name = name + + return &CA{ + CA: helper.CA{ + Name: name, + Namespace: namespace, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, + } +} + +func GetCA3() *CA { + caOverrides := &v1.ServerConfig{ + Debug: pointer.True(), + TLS: v1.ServerTLSConfig{ + CertFile: tlsCert, + KeyFile: tlsKey, + }, + } + + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + var replicas int32 + replicas = 3 + name := "ibpca3" + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + Domain: domain, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + }, + FabricVersion: integration.FabricCAVersion, + License: current.License{ + Accept: true, + }, + Replicas: &replicas, + }, + } + + return &CA{ + CA: helper.CA{ + Name: name, + Namespace: namespace, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, + } +} + +func GetIntermediateCA() *CA { + caOverrides := &v1.ServerConfig{ + Debug: pointer.True(), + TLS: v1.ServerTLSConfig{ + CertFile: tlsCert, + KeyFile: tlsKey, + }, + CAConfig: v1.CAConfig{ + Intermediate: v1.IntermediateCA{ + ParentServer: v1.ParentServer{ + URL: fmt.Sprintf("https://admin:adminpw@%s-ibpca1-ca.%s", namespace, domain), + }, + TLS: v1.ClientTLSConfig{ + Enabled: pointer.True(), + CertFiles: []string{trustedRootTLSCert}, + }, + }, + }, + } + + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + + name := "interca" + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + Domain: domain, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + }, + FabricVersion: integration.FabricCAVersion, + }, + } + + return &CA{ + CA: helper.CA{ + Name: name, + Namespace: namespace, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, + } +} diff --git a/integration/cclauncher/cclauncher_suite_test.go b/integration/cclauncher/cclauncher_suite_test.go new file mode 100644 index 00000000..26be1ba0 --- /dev/null +++ b/integration/cclauncher/cclauncher_suite_test.go @@ -0,0 +1,278 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cclauncher_test + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "k8s.io/client-go/kubernetes" +) + +func TestCclauncher(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Cclauncher Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + peerAdminUsername = "peer-admin" + peerUsername = "peer" + ordererUsername = "orderer" + + IBPCAS = "ibpcas" + IBPPEERS = "ibppeers" + IBPORDERERS = "ibporderers" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA + org1peer *helper.Peer + orderer *helper.Orderer +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "cclauncher", "") + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerUsername, "peerpw", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("peer2", "peerpw2", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering and enrolling peer admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerAdminUsername, "peer-adminpw", "admin"), "Register Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername)) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername+"2")) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Second Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(ordererUsername, "ordererpw", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("orderer2", "ordererpw2", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + adminCertBytes, err := ioutil.ReadFile( + filepath.Join( + wd, + "org1peer", + peerAdminUsername, + "msp", + "signcerts", + "cert.pem", + ), + ) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + + By("starting Peer pod", func() { + org1peer = Org1Peer(profile.TLS.Cert, caHost, adminCertB64) + err = helper.CreatePeer(ibpCRClient, org1peer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, "../../scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, Org1Peer("", "", "").Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func Org1Peer(tlsCert, caHost, adminCert string) *helper.Peer { + cr, err := helper.Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert) + Expect(err).NotTo(HaveOccurred()) + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/cclauncher/cclauncher_test.go b/integration/cclauncher/cclauncher_test.go new file mode 100644 index 00000000..5ca4e655 --- /dev/null +++ b/integration/cclauncher/cclauncher_test.go @@ -0,0 +1,98 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cclauncher_test + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/yaml" +) + +var _ = Describe("chaincode launcher", func() { + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("V2 Peer", func() { + It("creates peer resources", func() { + By("creating deployment that contains four containers", func() { + dep, err := kclient.AppsV1().Deployments(namespace).Get(context.TODO(), org1peer.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(4)) + }) + + By("creating config map with external builders", func() { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), fmt.Sprintf("%s-config", org1peer.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + v2Core := &v2.Core{} + coreBytes := cm.BinaryData["core.yaml"] + err = yaml.Unmarshal(coreBytes, v2Core) + Expect(err).NotTo(HaveOccurred()) + + extBuilder := v2.ExternalBuilder{ + Path: "/usr/local", + Name: "ibp-builder", + EnvironmentWhiteList: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + }, + PropogateEnvironment: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + "PEER_NAME", + }, + } + Expect(v2Core.Chaincode.ExternalBuilders).To(ContainElement(extBuilder)) + }) + + By("setting builders environment variables", func() { + dep, err := kclient.AppsV1().Deployments(namespace).Get(context.TODO(), org1peer.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + peerContainer := dep.Spec.Template.Spec.Containers[0] + + dirEnvVar := corev1.EnvVar{ + Name: "IBP_BUILDER_SHARED_DIR", + Value: "/cclauncher", + } + Expect(peerContainer.Env).To(ContainElement(dirEnvVar)) + + endpointEnvVar := corev1.EnvVar{ + Name: "IBP_BUILDER_ENDPOINT", + Value: "127.0.0.1:11111", + } + Expect(peerContainer.Env).To(ContainElement(endpointEnvVar)) + }) + }) + }) +}) diff --git a/integration/console/console_suite_test.go b/integration/console/console_suite_test.go new file mode 100644 index 00000000..59728c38 --- /dev/null +++ b/integration/console/console_suite_test.go @@ -0,0 +1,93 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package console_test + +import ( + "context" + "os" + "strings" + "testing" + + "github.com/IBM-Blockchain/fabric-operator/integration" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/kubernetes" +) + +func TestConsole(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Console Suite") +} + +var ( + namespace string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + testFailed bool +) + +var _ = BeforeSuite(func() { + var err error + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "console", "") + Expect(err).NotTo(HaveOccurred()) + + console = GetConsole() + result := ibpCRClient.Post().Namespace(namespace).Resource("ibpconsoles").Body(console.CR).Do(context.TODO()) + err = result.Error() + if !k8serrors.IsAlreadyExists(err) { + Expect(result.Error()).NotTo(HaveOccurred()) + } + + // Disabled as it consumes too many resources on the GHA executor to reliably launch console1 + //console2 = GetConsole2() + //result = ibpCRClient.Post().Namespace(namespace).Resource("ibpconsoles").Body(console2.CR).Do(context.TODO()) + //err = result.Error() + //if !k8serrors.IsAlreadyExists(err) { + // Expect(err).NotTo(HaveOccurred()) + //} + + console3 = GetConsole3() + result = ibpCRClient.Post().Namespace(namespace).Resource("ibpconsoles").Body(console3.CR).Do(context.TODO()) + err = result.Error() + if !k8serrors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + err := integration.Cleanup(GinkgoWriter, kclient, namespace) + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/integration/console/console_test.go b/integration/console/console_test.go new file mode 100644 index 00000000..40a6f813 --- /dev/null +++ b/integration/console/console_test.go @@ -0,0 +1,689 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package console_test + +import ( + "context" + "encoding/json" + "math/rand" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" +) + +var ( + console *Console + console2 *Console // DISABLED + console3 *Console +) + +var ( + defaultRequestsConsole = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("30m"), + corev1.ResourceMemory: resource.MustParse("60M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + } + + defaultLimitsConsole = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("600M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + } + + defaultRequestsConfigtxlator = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("25m"), + corev1.ResourceMemory: resource.MustParse("50M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + } + + defaultLimitsConfigtxlator = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("25m"), + corev1.ResourceMemory: resource.MustParse("50M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + } + + defaultRequestsCouchdb = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("30m"), + corev1.ResourceMemory: resource.MustParse("60M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + } + + defaultLimitsCouchdb = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("600M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + } + + defaultRequestsDeployer = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + } + + defaultLimitsDeployer = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + } + + useTagsFlag = true +) + +var _ = Describe("Interaction between IBP-Operator and Kubernetes cluster", func() { + SetDefaultEventuallyTimeout(240 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("IBPConsole controller", func() { + Context("applying incorrectly configured third instance of IBPConsole CR", func() { + It("should set the CR status to error", func() { + Eventually(console3.pollForCRStatus).Should((Equal(current.Error))) + + crStatus := ¤t.IBPConsole{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpconsoles").Name(console3.Name).Do(context.TODO()) + result.Into(crStatus) + + Expect(crStatus.Status.Message).To(ContainSubstring("Service account name not provided")) + }) + + It("should delete the third instance of IBPConsole CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibpconsoles").Name(console3.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + }) + + // This test is disabled as it doesn't test anything interesting AND it consumes + // too many resources on the GHA pipeline, causing the primary test flow to starve + // and eventually time out. + PContext("applying the second instance of IBPConsole CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + It("creates a second IBPConsole custom resource", func() { + By("starting a pod", func() { + Eventually(console2.PodIsRunning).Should((Equal(true))) + }) + }) + + PIt("should find zone and region", func() { + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console2.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console2.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking zone", func() { + Expect(console2.TestAffinityZone(dep)).To((Equal(true))) + }) + + By("checking region", func() { + Expect(console2.TestAffinityRegion(dep)).To((Equal(true))) + }) + }) + + It("should delete the second instance of IBPConsole CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibpconsoles").Name(console2.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + }) + + Context("applying the first instance of IBPConsole CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + It("creates a IBPConsole custom resource", func() { + By("setting the CR status to deploying", func() { + Eventually(console.pollForCRStatus).Should(Equal(current.Deploying)) + }) + + By("creating a service", func() { + Eventually(console.ServiceExists).Should((Equal(true))) + }) + + By("creating a pvc", func() { + Eventually(console.PVCExists).Should((Equal(true))) + }) + + By("creating a configmap", func() { + Eventually(console.ConfigMapExists).Should((Equal(true))) + }) + + By("starting a ingress", func() { + Eventually(console.IngressExists).Should((Equal(true))) + }) + + By("creating a deployment", func() { + Eventually(console.DeploymentExists).Should((Equal(true))) + }) + + By("starting a pod", func() { + Eventually(console.PodIsRunning).Should((Equal(true))) + }) + + By("setting the CR status to deployed when pod is running", func() { + Eventually(console.pollForCRStatus).Should((Equal(current.Deployed))) + }) + }) + + It("should not find zone and region", func() { + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking zone", func() { + Expect(console.TestAffinityZone(dep)).Should((Equal(false))) + }) + + By("checking region", func() { + Expect(console.TestAffinityRegion(dep)).Should((Equal(false))) + }) + }) + + When("the custom resource is updated", func() { + var ( + err error + dep *appsv1.Deployment + newResourceRequestsConsole corev1.ResourceList + newResourceLimitsConsole corev1.ResourceList + newResourceRequestsConfigtxlator corev1.ResourceList + newResourceLimitsConfigtxlator corev1.ResourceList + newResourceRequestsCouchdb corev1.ResourceList + newResourceLimitsCouchdb corev1.ResourceList + newResourceRequestsDeployer corev1.ResourceList + newResourceLimitsDeployer corev1.ResourceList + ) + + BeforeEach(func() { + newResourceRequestsConsole = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("25m"), + corev1.ResourceMemory: resource.MustParse("50M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsConsole = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("500M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + newResourceRequestsConfigtxlator = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("30m"), + corev1.ResourceMemory: resource.MustParse("60M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsConfigtxlator = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("30m"), + corev1.ResourceMemory: resource.MustParse("60M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + newResourceRequestsCouchdb = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("35m"), + corev1.ResourceMemory: resource.MustParse("70M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsCouchdb = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("350m"), + corev1.ResourceMemory: resource.MustParse("700M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + newResourceRequestsDeployer = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("9m"), + corev1.ResourceMemory: resource.MustParse("18M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsDeployer = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("90m"), + corev1.ResourceMemory: resource.MustParse("180M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + Eventually(console.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("updates the instance of IBPConsole if resources are updated in CR", func() { + consoleResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(consoleResources.Requests).To(Equal(defaultRequestsConsole)) + Expect(consoleResources.Limits).To(Equal(defaultLimitsConsole)) + + deployerResources := dep.Spec.Template.Spec.Containers[1].Resources + Expect(deployerResources.Requests).To(Equal(defaultRequestsDeployer)) + Expect(deployerResources.Limits).To(Equal(defaultLimitsDeployer)) + + configtxResources := dep.Spec.Template.Spec.Containers[2].Resources + Expect(configtxResources.Requests).To(Equal(defaultRequestsConfigtxlator)) + Expect(configtxResources.Limits).To(Equal(defaultLimitsConfigtxlator)) + + couchdbResources := dep.Spec.Template.Spec.Containers[3].Resources + Expect(couchdbResources.Requests).To(Equal(defaultRequestsCouchdb)) + Expect(couchdbResources.Limits).To(Equal(defaultLimitsCouchdb)) + + console.CR.Spec.Resources = ¤t.ConsoleResources{ + Console: &corev1.ResourceRequirements{ + Requests: newResourceRequestsConsole, + Limits: newResourceLimitsConsole, + }, + Configtxlator: &corev1.ResourceRequirements{ + Requests: newResourceRequestsConfigtxlator, + Limits: newResourceLimitsConfigtxlator, + }, + CouchDB: &corev1.ResourceRequirements{ + Requests: newResourceRequestsCouchdb, + Limits: newResourceLimitsCouchdb, + }, + Deployer: &corev1.ResourceRequirements{ + Requests: newResourceRequestsDeployer, + Limits: newResourceLimitsDeployer, + }, + } + console.CR.Spec.Password = "" + bytes, err := json.Marshal(console.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibpconsoles").Name(console.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == newResourceRequestsConsole.Cpu().MilliValue() { + return true, nil + } + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + updatedConsoleResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(updatedConsoleResources.Requests).To(Equal(newResourceRequestsConsole)) + Expect(updatedConsoleResources.Limits).To(Equal(newResourceLimitsConsole)) + + updatedDeployerResources := dep.Spec.Template.Spec.Containers[1].Resources + Expect(updatedDeployerResources.Requests).To(Equal(newResourceRequestsDeployer)) + Expect(updatedDeployerResources.Limits).To(Equal(newResourceLimitsDeployer)) + + updatedConfigtxResources := dep.Spec.Template.Spec.Containers[2].Resources + Expect(updatedConfigtxResources.Requests).To(Equal(newResourceRequestsConfigtxlator)) + Expect(updatedConfigtxResources.Limits).To(Equal(newResourceLimitsConfigtxlator)) + + updatedCouchDBResources := dep.Spec.Template.Spec.Containers[3].Resources + Expect(updatedCouchDBResources.Requests).To(Equal(newResourceRequestsCouchdb)) + Expect(updatedCouchDBResources.Limits).To(Equal(newResourceLimitsCouchdb)) + }) + }) + + When("a deployment managed by operator is manually edited", func() { + var ( + err error + dep *appsv1.Deployment + ) + + BeforeEach(func() { + Eventually(console.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("restores states", func() { + origRequests := dep.Spec.Template.Spec.Containers[0].Resources.Requests + dep.Spec.Template.Spec.Containers[0].Resources.Requests = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("107m"), + corev1.ResourceMemory: resource.MustParse("107M"), + } + + depBytes, err := json.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + _, err = kclient.AppsV1().Deployments(namespace).Patch(context.TODO(), console.Name, types.MergePatchType, depBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == origRequests.Cpu().MilliValue() { + return true, nil + } + } + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), console.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(origRequests)) + }) + }) + + It("should delete the first instance of IBPConsole CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibpconsoles").Name(console.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + }) + }) +}) + +func shuf(min, max int) int32 { + rand.Seed(time.Now().UnixNano()) + return int32(rand.Intn(max-min+1) + min) +} + +func GetConsole() *Console { + consolePort := shuf(30000, 32768) + proxyPort := shuf(30000, 32768) + + name := "ibpconsole1" + cr := ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ConnectionString: "http://localhost:5984", + ServiceAccountName: "ibpconsole1", + NetworkInfo: ¤t.NetworkInfo{ + Domain: integration.TestAutomation1IngressDomain, + ConsolePort: consolePort, + ProxyPort: proxyPort, + }, + Email: "admin@ibm.com", + Password: "cGFzc3dvcmQ=", + Resources: ¤t.ConsoleResources{ + Console: &corev1.ResourceRequirements{ + Requests: defaultRequestsConsole, + Limits: defaultLimitsConsole, + }, + Configtxlator: &corev1.ResourceRequirements{ + Requests: defaultRequestsConfigtxlator, + Limits: defaultLimitsConfigtxlator, + }, + CouchDB: &corev1.ResourceRequirements{ + Requests: defaultRequestsCouchdb, + Limits: defaultLimitsCouchdb, + }, + Deployer: &corev1.ResourceRequirements{ + Requests: defaultRequestsDeployer, + Limits: defaultLimitsDeployer, + }, + }, + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.ConsoleImages{ + ConfigtxlatorImage: integration.ConfigtxlatorImage, + ConfigtxlatorTag: integration.ConfigtxlatorTag, + ConsoleImage: integration.ConsoleImage, + ConsoleTag: integration.ConsoleTag, + ConsoleInitImage: integration.InitImage, + ConsoleInitTag: integration.InitTag, + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + DeployerImage: integration.DeployerImage, + DeployerTag: integration.DeployerTag, + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + integration.FabricCAVersion: current.VersionCA{ + Default: true, + Version: integration.FabricCAVersion, + Image: current.CAImages{ + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + CAImage: integration.CaImage, + CATag: integration.CaTag, + }, + }, + }, + Peer: map[string]current.VersionPeer{ + integration.FabricVersion: current.VersionPeer{ + Default: true, + Version: integration.FabricVersion, + Image: current.PeerImages{ + PeerInitImage: integration.InitImage, + PeerInitTag: integration.InitTag, + PeerImage: integration.PeerImage, + PeerTag: integration.PeerTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + integration.FabricVersion: current.VersionOrderer{ + Default: true, + Version: integration.FabricVersion, + Image: current.OrdererImages{ + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + }, + }, + }, + }, + UseTags: &useTagsFlag, + }, + } + cr.Name = name + + return &Console{ + Name: name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + } +} + +// DISABLED +func GetConsole2() *Console { + consolePort := shuf(30000, 32768) + proxyPort := shuf(30000, 32768) + + name := "ibpconsole2" + cr := ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ConnectionString: "http://localhost:5984", + ServiceAccountName: "ibpconsole1", + NetworkInfo: ¤t.NetworkInfo{ + Domain: integration.TestAutomation1IngressDomain, + ConsolePort: consolePort, + ProxyPort: proxyPort, + }, + Email: "admin@ibm.com", + Password: "cGFzc3dvcmQ=", + Zone: "select", + Region: "select", + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.ConsoleImages{ + ConfigtxlatorImage: integration.ConfigtxlatorImage, + ConfigtxlatorTag: integration.ConfigtxlatorTag, + ConsoleImage: integration.ConsoleImage, + ConsoleTag: integration.ConsoleTag, + ConsoleInitImage: integration.InitImage, + ConsoleInitTag: integration.InitTag, + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + DeployerImage: integration.DeployerImage, + DeployerTag: integration.DeployerTag, + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + integration.FabricCAVersion: current.VersionCA{ + Default: true, + Version: integration.FabricCAVersion, + Image: current.CAImages{ + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + CAImage: integration.CaImage, + CATag: integration.CaTag, + }, + }, + }, + Peer: map[string]current.VersionPeer{ + integration.FabricVersion: current.VersionPeer{ + Default: true, + Version: integration.FabricVersion, + Image: current.PeerImages{ + PeerInitImage: integration.InitImage, + PeerInitTag: integration.InitTag, + PeerImage: integration.PeerImage, + PeerTag: integration.PeerTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + integration.FabricVersion: current.VersionOrderer{ + Default: true, + Version: integration.FabricVersion, + Image: current.OrdererImages{ + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + }, + }, + }, + }, + UseTags: &useTagsFlag, + }, + } + cr.Name = name + + return &Console{ + Name: name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetConsole3() *Console { + consolePort := shuf(30000, 32768) + proxyPort := shuf(30000, 32768) + + name := "ibpconsole3" + cr := ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ServiceAccountName: "", // Will cause error + NetworkInfo: ¤t.NetworkInfo{ + Domain: integration.TestAutomation1IngressDomain, + ConsolePort: consolePort, + ProxyPort: proxyPort, + }, + Images: ¤t.ConsoleImages{ + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + }, + UseTags: &useTagsFlag, + }, + } + cr.Name = name + + return &Console{ + Name: name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + } +} + +type Console struct { + Name string + CR *current.IBPConsole + integration.NativeResourcePoller +} + +func (console *Console) pollForCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPConsole{} + + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpconsoles").Name(console.Name).Do(context.TODO()) + result.Into(crStatus) + + return crStatus.Status.Type +} diff --git a/integration/e2ev2/.gitignore b/integration/e2ev2/.gitignore new file mode 100644 index 00000000..9e77f989 --- /dev/null +++ b/integration/e2ev2/.gitignore @@ -0,0 +1,2 @@ +org1ca/ +org1peer/ diff --git a/integration/e2ev2/ca_test.go b/integration/e2ev2/ca_test.go new file mode 100644 index 00000000..1bc9a05e --- /dev/null +++ b/integration/e2ev2/ca_test.go @@ -0,0 +1,90 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2ev2_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" +) + +const ( + IBPCAS = "ibpcas" +) + +var ( + org1ca *helper.CA +) + +var _ = Describe("ca", func() { + BeforeEach(func() { + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + + ClearOperatorConfig() + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + // Marked as Pending because slow clusters makes this test flaky as the CR + // doesn't get created, so there is a small window of time to catch + // its error status before it disappears. + PContext("validate CR name when created", func() { + BeforeEach(func() { + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + }) + + When("creating a CA with a pre-existing CR name", func() { + It("puts CA in error phase", func() { + org1ca2 := Org1CA2() + helper.CreateCA(ibpCRClient, org1ca2.CR) + + Eventually(org1ca2.PollForCRStatus).Should((Equal(current.Error))) + }) + }) + }) +}) + +func Org1CA2() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + // Set CR name to existing cr name for test + cr.Name = "org1peer" + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/e2ev2/config.yaml b/integration/e2ev2/config.yaml new file mode 100644 index 00000000..b07ede88 --- /dev/null +++ b/integration/e2ev2/config.yaml @@ -0,0 +1,33 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +NodeOUs: + Enable: true + ClientOUIdentifier: + # Certificate: cacerts/cclauncher-org1ca-ca-ibpv2-test-cluster-us-south-containers-appdomain-cloud-7054.pem + OrganizationalUnitIdentifier: OU_client + PeerOUIdentifier: + # Certificate: cacerts/cclauncher-org1ca-ca-ibpv2-test-cluster-us-south-containers-appdomain-cloud-7054.pem + OrganizationalUnitIdentifier: OU_peer + AdminOUIdentifier: + # Certificate: cacerts/cclauncher-org1ca-ca-ibpv2-test-cluster-us-south-containers-appdomain-cloud-7054.pem + OrganizationalUnitIdentifier: OU_admin + OrdererOUIdentifier: + # Certificate: cacerts/cclauncher-org1ca-ca-ibpv2-test-cluster-us-south-containers-appdomain-cloud-7054.pem + OrganizationalUnitIdentifier: OU_orderer diff --git a/integration/e2ev2/console_test.go b/integration/e2ev2/console_test.go new file mode 100644 index 00000000..72be55e4 --- /dev/null +++ b/integration/e2ev2/console_test.go @@ -0,0 +1,235 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2ev2_test + +import ( + "context" + "encoding/json" + "math/rand" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + console *Console +) + +var _ = Describe("console", func() { + BeforeEach(func() { + Eventually(console.PodIsRunning).Should((Equal(true))) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("trigger actions", func() { + var ( + podName string + ibpconsole *current.IBPConsole + ) + + BeforeEach(func() { + Eventually(func() int { + return len(console.GetPods()) + }).Should(Equal(1)) + + podName = console.GetPods()[0].Name + + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpconsoles").Name(console.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibpconsole = ¤t.IBPConsole{} + result.Into(ibpconsole) + }) + + When("spec has restart flag set to true", func() { + BeforeEach(func() { + ibpconsole.Spec.Action.Restart = true + }) + + It("performs restart action", func() { + bytes, err := json.Marshal(ibpconsole) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Put().Namespace(namespace).Resource("ibpconsoles").Name(console.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(console.PodIsRunning).Should((Equal(true))) + + By("restarting console pod", func() { + Eventually(func() bool { + pods := console.GetPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("setting restart flag back to false after restart", func() { + Eventually(func() bool { + result := ibpCRClient.Get().Namespace(namespace).Resource("ibpconsoles").Name(console.Name).Do(context.TODO()) + console := ¤t.IBPConsole{} + result.Into(console) + + return console.Spec.Action.Restart + }).Should(Equal(false)) + }) + }) + }) + }) + +}) + +func CreateConsole(console *Console) { + result := ibpCRClient.Post().Namespace(namespace).Resource("ibpconsoles").Body(console.CR).Do(context.TODO()) + err := result.Error() + if !k8serrors.IsAlreadyExists(err) { + Expect(result.Error()).NotTo(HaveOccurred()) + } +} + +func GetConsole() *Console { + consolePort := randNum(30000, 32768) + proxyPort := randNum(30000, 32768) + + useTagsFlag := true + + cr := ¤t.IBPConsole{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPConsole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ibpconsole", + Namespace: namespace, + }, + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ConnectionString: "http://localhost:5984", + ServiceAccountName: "ibpconsole1", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test-domain", + ConsolePort: consolePort, + ProxyPort: proxyPort, + }, + Email: "admin@ibm.com", + Password: "cGFzc3dvcmQ=", + Zone: "select", + Region: "select", + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.ConsoleImages{ + ConfigtxlatorImage: integration.ConfigtxlatorImage, + ConfigtxlatorTag: integration.ConfigtxlatorTag, + ConsoleImage: integration.ConsoleImage, + ConsoleTag: integration.ConsoleTag, + ConsoleInitImage: integration.InitImage, + ConsoleInitTag: integration.InitTag, + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + DeployerImage: integration.DeployerImage, + DeployerTag: integration.DeployerTag, + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + integration.FabricCAVersion: current.VersionCA{ + Default: true, + Version: integration.FabricCAVersion, + Image: current.CAImages{ + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + CAImage: integration.CaImage, + CATag: integration.CaTag, + }, + }, + }, + Peer: map[string]current.VersionPeer{ + integration.FabricVersion: current.VersionPeer{ + Default: true, + Version: integration.FabricVersion, + Image: current.PeerImages{ + PeerInitImage: integration.InitImage, + PeerInitTag: integration.InitTag, + PeerImage: integration.PeerImage, + PeerTag: integration.PeerTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + integration.FabricVersion: current.VersionOrderer{ + Default: true, + Version: integration.FabricVersion, + Image: current.OrdererImages{ + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + }, + }, + }, + }, + UseTags: &useTagsFlag, + }, + } + + return &Console{ + Name: cr.Name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +type Console struct { + Name string + CR *current.IBPConsole + integration.NativeResourcePoller +} + +func randNum(min, max int) int32 { + rand.Seed(time.Now().UnixNano()) + return int32(rand.Intn(max-min+1) + min) +} diff --git a/integration/e2ev2/e2ev2_suite_test.go b/integration/e2ev2/e2ev2_suite_test.go new file mode 100644 index 00000000..d5d64218 --- /dev/null +++ b/integration/e2ev2/e2ev2_suite_test.go @@ -0,0 +1,252 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2ev2_test + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + + "k8s.io/client-go/kubernetes" +) + +func TestE2ev2(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "E2ev2 Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + peerAdminUsername = "peer-admin" + peerUsername = "peer" + ordererUsername = "orderer" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "e2ev2", "") + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + SetupConsole() + CreateNetwork() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) +}) + +func SetupConsole() { + console = GetConsole() + CreateConsole(console) +} + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerUsername, "peerpw", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("peer2", "peerpw2", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering and enrolling peer admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerAdminUsername, "peer-adminpw", "admin"), "Register Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername)) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername+"2")) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Second Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(ordererUsername, "ordererpw", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("orderer2", "ordererpw2", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + adminCertBytes, err := ioutil.ReadFile( + filepath.Join( + wd, + "org1peer", + peerAdminUsername, + "msp", + "signcerts", + "cert.pem", + ), + ) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + tlsCert := base64.StdEncoding.EncodeToString(tlsBytes) + + By("starting Peer pod", func() { + org1peer = Org1Peer(tlsCert, caHost, adminCertB64) + err = helper.CreatePeer(ibpCRClient, org1peer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + By("starting Orderer pod", func() { + orderer = GetOrderer(tlsCert, caHost) + err = helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + Eventually(orderer.Nodes[0].PodIsRunning).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, "../../scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, Org1Peer("", "", "").Name)) + os.RemoveAll(filepath.Join(wd, GetOrderer("", "").Nodes[0].Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func CopyFile(from string, to string) { + bytes, err := ioutil.ReadFile(from) + Expect(err).NotTo(HaveOccurred()) + + err = ioutil.WriteFile(to, bytes, 0644) + Expect(err).NotTo(HaveOccurred()) +} diff --git a/integration/e2ev2/e2ev2_test.go b/integration/e2ev2/e2ev2_test.go new file mode 100644 index 00000000..9d4818b9 --- /dev/null +++ b/integration/e2ev2/e2ev2_test.go @@ -0,0 +1,203 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2ev2_test + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/onsi/gomega/gexec" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + org1peer *helper.Peer + orderer *helper.Orderer + peeradmin *PeerAdmin +) + +type PeerAdmin struct { + Envs []string +} + +func NewPeerAdminSession(org1peer *helper.Peer, tlsRootCertPath string, address string) *PeerAdmin { + peerHome := filepath.Join(wd, org1peer.Name) + + CopyFile("./config/core.yaml", filepath.Join(peerHome, "core.yaml")) + CopyFile("./config.yaml", filepath.Join(peerHome, peerAdminUsername, "/msp/config.yaml")) + + envs := []string{ + fmt.Sprintf("FABRIC_CFG_PATH=%s", peerHome), + fmt.Sprintf("CORE_PEER_TLS_ENABLED=%s", "true"), + fmt.Sprintf("CORE_PEER_LOCALMSPID=%s", org1peer.CR.Spec.MSPID), + fmt.Sprintf("CORE_PEER_TLS_ROOTCERT_FILE=%s", tlsRootCertPath), + fmt.Sprintf("CORE_PEER_MSPCONFIGPATH=%s", filepath.Join(wd, org1peer.Name, peerAdminUsername, "msp")), + fmt.Sprintf("CORE_PEER_ADDRESS=%s", address), + } + + envs = append(envs, os.Environ()...) + + return &PeerAdmin{ + Envs: envs, + } +} + +type Chaincode struct { + Path string + Lang string + Label string + OutputFile string +} + +func (p *PeerAdmin) PackageChaincode(c Chaincode) { + args := []string{ + "lifecycle", "chaincode", "package", + "--path", c.Path, + "--lang", c.Lang, + "--label", c.Label, + c.OutputFile, + } + + cmd := helper.GetCommand(helper.AbsPath(wd, "bin/peer"), args...) + cmd.Env = p.Envs + + sess, err := helper.StartSession(cmd, "Package Chaincode") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func (p *PeerAdmin) InstallChaincode(packageFile string) { + args := []string{ + "lifecycle", "chaincode", "install", + packageFile, + } + + cmd := helper.GetCommand(helper.AbsPath(wd, "bin/peer"), args...) + cmd.Env = p.Envs + cmd.Env = append(cmd.Env, fmt.Sprintf("FABRIC_LOGGING_SPEC=%s", "debug")) + + sess, err := helper.StartSession(cmd, "Install Chaincode") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func Org1Peer(tlsCert, caHost, adminCert string) *helper.Peer { + cr, err := helper.Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert) + Expect(err).NotTo(HaveOccurred()) + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func ClearOperatorConfig() { + err := kclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), "operator-config", *metav1.NewDeleteOptions(0)) + if !k8serrors.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred()) + } +} + +func GetBackup(certType, name string) *common.Backup { + backupSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("%s-crypto-backup", name), metav1.GetOptions{}) + if err != nil { + Expect(k8serrors.IsNotFound(err)).To(Equal(true)) + return &common.Backup{} + } + + backup := &common.Backup{} + key := fmt.Sprintf("%s-backup.json", certType) + err = json.Unmarshal(backupSecret.Data[key], backup) + Expect(err).NotTo(HaveOccurred()) + + return backup +} + +func TLSSignCert(name string) []byte { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-signcert", name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return secret.Data["cert.pem"] +} + +func TLSKeystore(name string) []byte { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("tls-%s-keystore", name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return secret.Data["key.pem"] +} + +func EcertSignCert(name string) []byte { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return secret.Data["cert.pem"] +} + +func EcertKeystore(name string) []byte { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-keystore", name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return secret.Data["key.pem"] +} + +func EcertCACert(name string) []byte { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("ecert-%s-cacerts", name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + return secret.Data["cacert-0.pem"] +} diff --git a/integration/e2ev2/orderer_test.go b/integration/e2ev2/orderer_test.go new file mode 100644 index 00000000..1ea7ed53 --- /dev/null +++ b/integration/e2ev2/orderer_test.go @@ -0,0 +1,381 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2ev2_test + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +const ( + IBPORDERERS = "ibporderers" + + signCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUekNDQWZXZ0F3SUJBZ0lVQWNnakVkOHBkOE43Vjg0YmFleG4yQzU0dWtzd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRFeE1qRTRNell3TUZvWERUSTBNVEV4TURFNE5ERXdNRm93WHpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1SQXdEZ1lEVlFRREV3ZHZjbVJsY21WeU1Ga3dFd1lICktvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUU2NFJwK1pvVnYyaTg0cE5KUUFNUHJpenJmZVlNT2Y0UnZ1eHkKNHZOUU1Pd3JEemlIZkFLTnZmdUJlbDhpQ2dndHRXM2paZTVkSEFZaFVIS2Ryb3FodmFPQmhUQ0JnakFPQmdOVgpIUThCQWY4RUJBTUNCNEF3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVWakl3Y1YwYXRNZmZWV1E5CnhtenpXVG9uYmlJd0h3WURWUjBqQkJnd0ZvQVVTUU9ZL0Z5YnNXcTlIWEo3c296aUFyLzhtQkV3SWdZRFZSMFIKQkJzd0dZSVhVMkZoWkhNdFRXRmpRbTl2YXkxUWNtOHViRzlqWVd3d0NnWUlLb1pJemowRUF3SURTQUF3UlFJaApBUGE4Y3VjL3QvOW45ZDZlSHZoUWdialNBK1k2dytERW1ka2RpdnJHaGE5RUFpQXdTZStlVGdsQWJYQVNoTnhwCkJpR0Rjc2IwZ1pmRmhQd1pIN1VnQW1IQjN3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + certKey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1p2VWRsUVZ6QlVSc3I2STMKZEVvd0ZlVGkvVkNLZVZqMmFwN2x3QWNYSzJLaFJBTkNBQVRyaEduNW1oVy9hTHppazBsQUF3K3VMT3Q5NWd3NQovaEcrN0hMaTgxQXc3Q3NQT0lkOEFvMjkrNEY2WHlJS0NDMjFiZU5sN2wwY0JpRlFjcDJ1aXFHOQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" + caCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVZi84bk94M2NqM1htVzNDSUo1L0Q1ejRRcUVvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBek1ERTNNamd3TUZvWERUTTBNVEF5TmpFM01qZ3dNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVSbzNmbUc2UHkyUHd6cUMwNnFWZDlFOFgKZ044eldqZzFMb3lnMmsxdkQ4MXY1dENRRytCTVozSUJGQnI2VTRhc0tZTUREakd6TElERmdUUTRjVDd1VktORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZFa0RtUHhjbTdGcXZSMXllN0tNNGdLLy9KZ1JNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRC92QVFVSEh2SWwKQWZZLzM5UWdEU2ltTWpMZnhPTG44NllyR1EvWHpkQVpBaUFpUmlyZmlMdzVGbXBpRDhtYmlmRjV4bzdFUzdqNApaUWQyT0FUNCt5OWE0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" +) + +var _ = Describe("orderer", func() { + var ( + node1 helper.Orderer + ) + + BeforeEach(func() { + node1 = orderer.Nodes[0] + Eventually(node1.PodIsRunning, time.Second*60, time.Second*2).Should((Equal(true))) + + ClearOperatorConfig() + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("config overrides", func() { + var ( + podName string + bytes []byte + ) + + BeforeEach(func() { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), orderer.Name+"node1-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + ordererBytes := cm.BinaryData["orderer.yaml"] + ordererConfig, err := v2config.ReadOrdererFromBytes(ordererBytes) + Expect(err).NotTo(HaveOccurred()) + Expect(ordererConfig.General.Keepalive.ServerMinInterval.Duration).To(Equal(common.MustParseDuration("30h").Duration)) + + configOverride := &v2config.Orderer{ + Orderer: v2.Orderer{ + General: v2.General{ + Keepalive: v1.Keepalive{ + ServerInterval: common.MustParseDuration("20h"), + }, + }, + }, + } + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + orderer.CR.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + + orderer.CR.Name = orderer.CR.Name + "node1" + + bytes, err = json.Marshal(orderer.CR) + Expect(err).NotTo(HaveOccurred()) + + podName = node1.GetRunningPods()[0].Name + }) + + It("updates config based on overrides", func() { + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + By("updating config in config map", func() { + var ordererConfig *v2config.Orderer + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), orderer.Name+"node1-config", metav1.GetOptions{}) + if err != nil { + return false + } + + ordererBytes := cm.BinaryData["orderer.yaml"] + ordererConfig, err = v2config.ReadOrdererFromBytes(ordererBytes) + if err != nil { + return false + } + + if ordererConfig.General.Keepalive.ServerInterval.Duration == common.MustParseDuration("20h").Duration { + return true + } + + return false + }).Should(Equal(true)) + + Expect(ordererConfig.General.Keepalive.ServerMinInterval.Duration).To(Equal(common.MustParseDuration("30h").Duration)) + Expect(ordererConfig.General.Keepalive.ServerInterval.Duration).To(Equal(common.MustParseDuration("20h").Duration)) + }) + + By("restarting orderer pods", func() { + Eventually(func() bool { + pods := node1.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + }) + + Context("msp certs", func() { + var ( + podName string + oldsigncert []byte + oldkeystore []byte + oldcacert []byte + ) + + BeforeEach(func() { + Eventually(func() int { return len(node1.GetRunningPods()) }).Should(Equal(1)) + + pods := node1.GetPods() + podName = pods[0].Name + + // Store original certs + oldsigncert = EcertSignCert(node1.Name) + oldkeystore = EcertKeystore(node1.Name) + oldcacert = EcertCACert(node1.Name) + }) + + It("updates secrets for new certs passed through MSP spec", func() { + + patch := func(i client.Object) { + testOrderer := i.(*current.IBPOrderer) + testOrderer.Spec.Secret = ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{ + SignCerts: signCert, + KeyStore: certKey, + CACerts: []string{caCert}, + }, + }, + } + } + + err := integration.ResilientPatch(ibpCRClient, node1.Name, namespace, "ibporderers", 3, ¤t.IBPOrderer{}, patch) + Expect(err).NotTo(HaveOccurred()) + + By("restarting node", func() { + Eventually(func() bool { + pods := node1.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == podName { + return false + } + + return true + }).Should(Equal(true)) + + Eventually(node1.PodIsRunning).Should((Equal(true))) + }) + + By("backing up old signcert", func() { + backup := GetBackup("ecert", node1.Name) + Expect(len(backup.List)).NotTo(Equal(0)) + Expect(backup.List[len(backup.List)-1].SignCerts).To(Equal(base64.StdEncoding.EncodeToString(oldsigncert))) + Expect(backup.List[len(backup.List)-1].KeyStore).To(Equal(base64.StdEncoding.EncodeToString(oldkeystore))) + Expect(backup.List[len(backup.List)-1].CACerts).To(Equal([]string{base64.StdEncoding.EncodeToString(oldcacert)})) + }) + + By("updating signcert secret", func() { + Expect(bytes.Equal(oldsigncert, EcertSignCert(node1.Name))).To(Equal(false)) + }) + + By("updating keystore secret", func() { + Expect(bytes.Equal(oldkeystore, EcertKeystore(node1.Name))).To(Equal(false)) + }) + + By("updating cacert secret", func() { + Expect(bytes.Equal(oldcacert, EcertCACert(node1.Name))).To(Equal(false)) + }) + }) + }) + + Context("node ou updated", func() { + var ( + podName string + bytes []byte + ibporderer *current.IBPOrderer + secret *corev1.Secret + ) + + BeforeEach(func() { + // Pods seem to run slower and restart slower when running test in Travis. + SetDefaultEventuallyTimeout(540 * time.Second) + + Eventually(func() int { return len(node1.GetRunningPods()) }).Should(Equal(1)) + podName = node1.GetRunningPods()[0].Name + + // Make sure config is in expected state + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), node1.Name+"-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + configBytes := cm.BinaryData["config.yaml"] + cfg, err := config.NodeOUConfigFromBytes(configBytes) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg.NodeOUs.Enable).To(Equal(true)) + + secret, err = kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-signcert", node1.Name), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibporderer = ¤t.IBPOrderer{} + result.Into(ibporderer) + }) + + It("disables nodeOU", func() { + By("providing admin certs", func() { + var err error + adminCert := base64.StdEncoding.EncodeToString(secret.Data["cert.pem"]) + + ibporderer.Spec.Secret.Enrollment.Component.AdminCerts = []string{adminCert} + ibporderer.Spec.Secret.MSP = nil + bytes, err = json.Marshal(ibporderer) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Put().Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(func() bool { + _, err := kclient.CoreV1(). + Secrets(namespace). + Get(context.TODO(), fmt.Sprintf("ecert-%s-admincerts", node1.Name), metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("disabling nodeOU", func() { + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibporderer = ¤t.IBPOrderer{} + result.Into(ibporderer) + + // Disable node ou + ibporderer.Spec.DisableNodeOU = ¤t.BoolTrue + bytes, err := json.Marshal(ibporderer) + Expect(err).NotTo(HaveOccurred()) + + result = ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + By("updating config map", func() { + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), node1.Name+"-config", metav1.GetOptions{}) + if err != nil { + return false + } + + configBytes := cm.BinaryData["config.yaml"] + nodeOUConfig, err := config.NodeOUConfigFromBytes(configBytes) + if err != nil { + return false + } + + return nodeOUConfig.NodeOUs.Enable + }).Should(Equal(false)) + }) + + By("restarting orderer node pods", func() { + Eventually(func() bool { + pods := node1.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + }) +}) + +func GetOrderer(tlsCert, caHost string) *helper.Orderer { + cr, err := helper.OrdererCR(namespace, domain, ordererUsername, tlsCert, caHost) + Expect(err).NotTo(HaveOccurred()) + + nodes := []helper.Orderer{ + helper.Orderer{ + Name: cr.Name + "node1", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = cr.Name + "node1" + + return &helper.Orderer{ + Name: cr.Name, + Namespace: namespace, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + Nodes: nodes, + } +} diff --git a/integration/e2ev2/peer_test.go b/integration/e2ev2/peer_test.go new file mode 100644 index 00000000..d76df336 --- /dev/null +++ b/integration/e2ev2/peer_test.go @@ -0,0 +1,204 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2ev2_test + +import ( + "context" + "encoding/json" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + v2peerconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +const ( + IBPPEERS = "ibppeers" +) + +var _ = Describe("peer", func() { + BeforeEach(func() { + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + + ClearOperatorConfig() + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("config overrides", func() { + var ( + bytes []byte + ) + + BeforeEach(func() { + // Make sure the config is in expected state + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), org1peer.Name+"-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + coreBytes := cm.BinaryData["core.yaml"] + peerConfig, err := v2peerconfig.ReadCoreFromBytes(coreBytes) + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Peer.ID).To(Equal("testPeerID")) + + // Update the config overrides + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + peer := ¤t.IBPPeer{} + result.Into(peer) + + configOverride := &v2peerconfig.Core{ + Core: v2.Core{ + Peer: v2.Peer{ + Keepalive: v2.KeepAlive{ + MinInterval: common.MustParseDuration("20h"), + }, + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + peer.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + + bytes, err = json.Marshal(peer) + Expect(err).NotTo(HaveOccurred()) + }) + + It("updates config based on overrides", func() { + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + var peerConfig *v2peerconfig.Core + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), org1peer.Name+"-config", metav1.GetOptions{}) + if err != nil { + return false + } + + coreBytes := cm.BinaryData["core.yaml"] + peerConfig, err = v2peerconfig.ReadCoreFromBytes(coreBytes) + if err != nil { + return false + } + + if peerConfig.Peer.Keepalive.MinInterval.Duration == common.MustParseDuration("20h").Duration { + return true + } + + return false + }).Should(Equal(true)) + + Expect(peerConfig.Peer.ID).To(Equal("testPeerID")) + Expect(peerConfig.Peer.Keepalive.MinInterval.Duration).To(Equal(common.MustParseDuration("20h").Duration)) + }) + }) + + Context("node ou updated", func() { + var ( + podName string + bytes []byte + ) + + BeforeEach(func() { + // Pods seem to run slower and restart slower when running test in Travis. + SetDefaultEventuallyTimeout(540 * time.Second) + + Eventually(func() int { return len(org1peer.GetRunningPods()) }).Should(Equal(1)) + podName = org1peer.GetRunningPods()[0].Name + + // Make sure config is in expected state + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), org1peer.Name+"-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + configBytes := cm.BinaryData["config.yaml"] + cfg, err := config.NodeOUConfigFromBytes(configBytes) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg.NodeOUs.Enable).To(Equal(true)) + + // Update the config overrides + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + peer := ¤t.IBPPeer{} + result.Into(peer) + + // Disable node ou + peer.Spec.DisableNodeOU = ¤t.BoolTrue + bytes, err = json.Marshal(peer) + Expect(err).NotTo(HaveOccurred()) + }) + + It("disables nodeOU", func() { + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + peer := ¤t.IBPPeer{} + result.Into(peer) + Expect(peer.Spec.NodeOUDisabled()).To(Equal(true)) + + By("updating config map", func() { + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), org1peer.Name+"-config", metav1.GetOptions{}) + if err != nil { + return false + } + + configBytes := cm.BinaryData["config.yaml"] + nodeOUConfig, err := config.NodeOUConfigFromBytes(configBytes) + if err != nil { + return false + } + + return nodeOUConfig.NodeOUs.Enable + }).Should(Equal(false)) + }) + + By("restarting peer pods", func() { + Eventually(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) == 0 { + return false + } + + newPodName := pods[0].Name + if newPodName != podName { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + }) +}) diff --git a/integration/helper/ca.go b/integration/helper/ca.go new file mode 100644 index 00000000..94b8c146 --- /dev/null +++ b/integration/helper/ca.go @@ -0,0 +1,184 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package helper + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "sigs.k8s.io/yaml" +) + +func CreateCA(crClient *ibpclient.IBPClient, ca *current.IBPCA) error { + result := crClient.Post().Namespace(ca.Namespace).Resource("ibpcas").Body(ca).Do(context.TODO()) + err := result.Error() + if !k8serrors.IsAlreadyExists(err) { + return err + } + return nil +} + +type CA struct { + Domain string + Name string + Namespace string + WorkingDir string + + CR *current.IBPCA + CRClient *ibpclient.IBPClient + KClient *kubernetes.Clientset + + integration.NativeResourcePoller +} + +func (ca *CA) PollForCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPCA{} + + result := ca.CRClient.Get().Namespace(ca.Namespace).Resource("ibpcas").Name(ca.Name).Do(context.TODO()) + // Not handling this because - integration test + _ = result.Into(crStatus) + + return crStatus.Status.Type +} + +func (ca *CA) HealthCheck(url string, cert []byte) bool { + rootCertPool := x509.NewCertPool() + rootCertPool.AppendCertsFromPEM(cert) + + transport := http.DefaultTransport + transport.(*http.Transport).TLSClientConfig = &tls.Config{ + RootCAs: rootCertPool, + MinVersion: tls.VersionTLS12, // TLS 1.2 recommended, TLS 1.3 (current latest version) encouraged + } + + client := http.Client{ + Transport: transport, + Timeout: 30 * time.Second, + } + + _, err := client.Get(url) + if err != nil { + return false + } + + return true +} + +func (ca *CA) ConnectionProfile() (*current.CAConnectionProfile, error) { + cm, err := ca.KClient.CoreV1().ConfigMaps(ca.Namespace).Get(context.TODO(), fmt.Sprintf("%s-connection-profile", ca.CR.Name), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + data := cm.BinaryData["profile.json"] + + profile := ¤t.CAConnectionProfile{} + err = yaml.Unmarshal(data, profile) + if err != nil { + return nil, err + } + + return profile, nil +} + +func (ca *CA) Address() string { + return fmt.Sprintf("%s-%s-ca.%s", ca.Namespace, ca.Name, ca.Domain) +} + +func (ca *CA) Register(name string, secret string, userType string) *exec.Cmd { + url := fmt.Sprintf("https://%s", ca.Address()) + args := []string{ + "--tls.certfiles", ca.TLSPath(), + "--id.name", name, + "--id.secret", secret, + "--id.type", userType, + "-u", url, + "-d", + } + return GetCommand(filepath.Join(ca.WorkingDir, "bin/fabric-ca-client register"), args...) +} + +func (ca *CA) Enroll(name string, secret string) *exec.Cmd { + url := fmt.Sprintf("https://%s:%s@%s", name, secret, ca.Address()) + args := []string{ + "--tls.certfiles", ca.TLSPath(), + "-u", url, + "-d", + } + return GetCommand(filepath.Join(ca.WorkingDir, "bin/fabric-ca-client enroll"), args...) +} + +func (ca *CA) DeleteIdentity(name string) *exec.Cmd { + url := fmt.Sprintf("https://%s", ca.Address()) + args := []string{ + name, + "--tls.certfiles", ca.TLSPath(), + "-u", url, + "-d", + } + return GetCommand(filepath.Join(ca.WorkingDir, "bin/fabric-ca-client identity remove"), args...) +} + +func (ca *CA) TLSToFile(cert []byte) error { + err := os.MkdirAll(filepath.Dir(ca.TLSPath()), 0750) + if err != nil { + return err + } + err = ioutil.WriteFile(ca.TLSPath(), cert, 0600) + if err != nil { + return err + } + return nil +} + +func (ca *CA) TLSPath() string { + return filepath.Join(ca.WorkingDir, ca.Name, "tls-cert.pem") +} + +func (ca *CA) JobWithPrefixFound(prefix, namespace string) bool { + jobs, err := ca.KClient.BatchV1().Jobs(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false + } + + for _, job := range jobs.Items { + if strings.HasPrefix(job.GetName(), prefix) { + return true + } + } + + return false +} diff --git a/integration/helper/crspecs.go b/integration/helper/crspecs.go new file mode 100644 index 00000000..cf8a3b72 --- /dev/null +++ b/integration/helper/crspecs.go @@ -0,0 +1,298 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package helper + +import ( + "encoding/json" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1orderer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v2orderer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + v2peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + v2ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func Org1CACR(namespace, domain string) *current.IBPCA { + return ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "org1ca", + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + }, + }, + Zone: "select", + Region: "select", + Domain: domain, + FabricVersion: integration.FabricVersion + "-1", + }, + } +} + +func Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert string) (*current.IBPPeer, error) { + resourceReq := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + } + + configOverride := v2peer.Core{ + Peer: v2peer.Peer{ + ID: "testPeerID", + }, + } + configBytes, err := json.Marshal(configOverride) + if err != nil { + return nil, err + } + + cr := ¤t.IBPPeer{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPPeer", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "org1peer", + Namespace: namespace, + }, + Spec: current.IBPPeerSpec{ + License: current.License{ + Accept: true, + }, + MSPID: "Org1MSP", + Region: "select", + Zone: "select", + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.PeerImages{ + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + PeerImage: integration.PeerImage, + PeerTag: integration.PeerTag, + PeerInitImage: integration.InitImage, + PeerInitTag: integration.InitTag, + }, + Domain: domain, + Resources: ¤t.PeerResources{ + DinD: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + }, + CouchDB: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + }, + FluentD: resourceReq, + GRPCProxy: resourceReq, + Peer: resourceReq, + }, + Storage: ¤t.PeerStorages{ + Peer: ¤t.StorageSpec{ + Size: "150Mi", + }, + StateDB: ¤t.StorageSpec{ + Size: "1Gi", + }, + }, + Ingress: current.Ingress{ + TlsSecretName: "tlssecret", + }, + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "ca", + CATLS: ¤t.CATLS{ + CACert: tlsCert, + }, + EnrollID: peerUsername, + EnrollSecret: "peerpw", + AdminCerts: []string{adminCert, adminCert}, + }, + TLS: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "tlsca", + CATLS: ¤t.CATLS{ + CACert: tlsCert, + }, + EnrollID: peerUsername, + EnrollSecret: "peerpw", + }, + }, + }, + ConfigOverride: &runtime.RawExtension{Raw: configBytes}, + FabricVersion: integration.FabricVersion + "-1", + }, + } + + return cr, nil +} + +func OrdererCR(namespace, domain, ordererUsername, tlsCert, caHost string) (*current.IBPOrderer, error) { + resourceReq := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + } + + configOverride := v2ordererconfig.Orderer{ + Orderer: v2orderer.Orderer{ + General: v2orderer.General{ + Keepalive: v1orderer.Keepalive{ + ServerMinInterval: commonapi.MustParseDuration("30h"), + }, + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + if err != nil { + return nil, err + } + + enrollment := ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "ca", + CATLS: ¤t.CATLS{ + CACert: tlsCert, + }, + EnrollID: ordererUsername, + EnrollSecret: "ordererpw", + }, + TLS: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "tlsca", + CATLS: ¤t.CATLS{ + CACert: tlsCert, + }, + EnrollID: ordererUsername, + EnrollSecret: "ordererpw", + }, + } + + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ibporderer1", + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + ClusterSize: 3, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + GenesisProfile: "Initial", + Domain: domain, + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + ClusterSecret: []*current.SecretSpec{ + ¤t.SecretSpec{ + Enrollment: enrollment, + }, + ¤t.SecretSpec{ + Enrollment: enrollment, + }, + ¤t.SecretSpec{ + Enrollment: enrollment, + }, + }, + Resources: ¤t.OrdererResources{ + Orderer: resourceReq, + }, + FabricVersion: integration.FabricVersion + "-1", + ConfigOverride: &runtime.RawExtension{Raw: configBytes}, + }, + } + + return cr, nil +} diff --git a/integration/helper/job.go b/integration/helper/job.go new file mode 100644 index 00000000..4b1099e1 --- /dev/null +++ b/integration/helper/job.go @@ -0,0 +1,44 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package helper + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// Since operator generates a unique ID to postfix the job's name with, this function +// obtains the job's unique identifier +func GetJobID(kclient *kubernetes.Clientset, namespace, name string) (string, error) { + jobs, err := kclient.BatchV1().Jobs(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + + for _, j := range jobs.Items { + if strings.Contains(j.Name, name) { + return j.Name, nil + } + } + + return "", err +} diff --git a/integration/helper/orderer.go b/integration/helper/orderer.go new file mode 100644 index 00000000..ef21a8a2 --- /dev/null +++ b/integration/helper/orderer.go @@ -0,0 +1,91 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package helper + +import ( + "context" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func CreateOrderer(crClient *ibpclient.IBPClient, orderer *current.IBPOrderer) error { + result := crClient.Post().Namespace(orderer.Namespace).Resource("ibporderers").Body(orderer).Do(context.TODO()) + err := result.Error() + if !k8serrors.IsAlreadyExists(err) { + return err + } + return nil +} + +type Orderer struct { + Domain string + Name string + Namespace string + NodeName string + Nodes []Orderer + WorkingDir string + + CR *current.IBPOrderer + CRClient *ibpclient.IBPClient + KClient *kubernetes.Clientset + + integration.NativeResourcePoller +} + +func (o *Orderer) PollForParentCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPOrderer{} + + result := o.CRClient.Get().Namespace(o.Namespace).Resource("ibporderers").Name(o.Name).Do(context.TODO()) + // Not handling this as this is integration test + _ = result.Into(crStatus) + + return crStatus.Status.Type +} + +func (o *Orderer) PollForCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPOrderer{} + + result := o.CRClient.Get().Namespace(o.Namespace).Resource("ibporderers").Name(o.NodeName).Do(context.TODO()) + // Not handling this as this is integration test + _ = result.Into(crStatus) + + return crStatus.Status.Type +} + +func (o *Orderer) JobWithPrefixFound(prefix, namespace string) bool { + jobs, err := o.KClient.BatchV1().Jobs(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false + } + + for _, job := range jobs.Items { + if strings.HasPrefix(job.GetName(), prefix) { + return true + } + } + + return false +} diff --git a/integration/helper/peer.go b/integration/helper/peer.go new file mode 100644 index 00000000..9134efb6 --- /dev/null +++ b/integration/helper/peer.go @@ -0,0 +1,114 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package helper + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "sigs.k8s.io/yaml" +) + +func CreatePeer(crClient *ibpclient.IBPClient, peer *current.IBPPeer) error { + result := crClient.Post().Namespace(peer.Namespace).Resource("ibppeers").Body(peer).Do(context.TODO()) + err := result.Error() + if !k8serrors.IsAlreadyExists(err) { + return err + } + return nil +} + +type Peer struct { + Domain string + Name string + Namespace string + WorkingDir string + + CR *current.IBPPeer + CRClient *ibpclient.IBPClient + KClient *kubernetes.Clientset + + integration.NativeResourcePoller +} + +func (p *Peer) PollForCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPPeer{} + + result := p.CRClient.Get().Namespace(p.Namespace).Resource("ibppeers").Name(p.Name).Do(context.TODO()) + // Not handling this as this is integration test + _ = result.Into(crStatus) + + return crStatus.Status.Type +} + +func (p *Peer) TLSToFile(cert []byte) error { + err := os.MkdirAll(filepath.Dir(p.TLSPath()), 0750) + if err != nil { + return err + } + return ioutil.WriteFile(p.TLSPath(), cert, 0600) +} + +func (p *Peer) TLSPath() string { + return filepath.Join(p.WorkingDir, p.Name, "tls-cert.pem") +} + +func (p *Peer) ConnectionProfile() (*current.CAConnectionProfile, error) { + cm, err := p.KClient.CoreV1().ConfigMaps(p.Namespace).Get(context.TODO(), fmt.Sprintf("%s-connection-profile", p.CR.Name), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + data := cm.BinaryData["profile.json"] + + profile := ¤t.CAConnectionProfile{} + err = yaml.Unmarshal(data, profile) + if err != nil { + return nil, err + } + + return profile, nil +} + +func (p *Peer) JobWithPrefixFound(prefix, namespace string) bool { + jobs, err := p.KClient.BatchV1().Jobs(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false + } + + for _, job := range jobs.Items { + if strings.HasPrefix(job.GetName(), prefix) { + return true + } + } + + return false +} diff --git a/integration/helper/session.go b/integration/helper/session.go new file mode 100644 index 00000000..7ebc8d1e --- /dev/null +++ b/integration/helper/session.go @@ -0,0 +1,81 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package helper + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega/gexec" +) + +var ( + colorIndex uint +) + +func AbsPath(wd string, script string) string { + return filepath.Join(wd, script) +} + +func GetCommand(command string, args ...string) *exec.Cmd { + for _, arg := range args { + command = command + " " + arg + } + // Ignoring this gosec issue as this is integration test code + return exec.Command("bash", "-c", command) // #nosec +} + +// StartSession executes a command session. This should be used to launch +// command line tools that are expected to run to completion. +func StartSession(cmd *exec.Cmd, name string) (*gexec.Session, error) { + ansiColorCode := nextColor() + fmt.Fprintf( + ginkgo.GinkgoWriter, + "\x1b[33m[d]\x1b[%s[%s]\x1b[0m starting %s %s with env var: %s\n", + ansiColorCode, + name, + filepath.Base(cmd.Args[0]), + strings.Join(cmd.Args[1:], " "), + cmd.Env, + ) + return gexec.Start( + cmd, + gexec.NewPrefixedWriter( + fmt.Sprintf("\x1b[32m[o]\x1b[%s[%s]\x1b[0m ", ansiColorCode, name), + ginkgo.GinkgoWriter, + ), + gexec.NewPrefixedWriter( + fmt.Sprintf("\x1b[91m[e]\x1b[%s[%s]\x1b[0m ", ansiColorCode, name), + ginkgo.GinkgoWriter, + ), + ) +} + +func nextColor() string { + color := colorIndex%14 + 31 + if color > 37 { + color = color + 90 - 37 + } + + colorIndex++ + return fmt.Sprintf("%dm", color) +} diff --git a/integration/images.go b/integration/images.go new file mode 100644 index 00000000..e0ff6a83 --- /dev/null +++ b/integration/images.go @@ -0,0 +1,45 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration + +const ( + FabricCAVersion = "1.5.3" + FabricVersion = "2.2.5" + FabricVersion24 = "2.4.3" + InitImage = "registry.access.redhat.com/ubi8/ubi-minimal" + InitTag = "latest" + CaImage = "hyperledger/fabric-ca" + CaTag = FabricCAVersion + PeerImage = "hyperledger/fabric-peer" + PeerTag = FabricVersion24 + OrdererImage = "hyperledger/fabric-orderer" + OrdererTag = FabricVersion24 + Orderer14Tag = "1.4.12" + Orderer24Tag = FabricVersion24 + ConfigtxlatorImage = "hyperledger/fabric-tools" + ConfigtxlatorTag = FabricVersion24 + CouchdbImage = "couchdb" + CouchdbTag = "3.2.1" + GrpcwebImage = "ghcr.io/hyperledger-labs/grpc-web" + GrpcwebTag = "latest" + ConsoleImage = "ghcr.io/hyperledger-labs/fabric-console" + ConsoleTag = "latest" + DeployerImage = "ghcr.io/ibm-blockchain/fabric-deployer" + DeployerTag = "latest-amd64" +) diff --git a/integration/init/init_suite_test.go b/integration/init/init_suite_test.go new file mode 100644 index 00000000..70c10775 --- /dev/null +++ b/integration/init/init_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package init_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestInit(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Init Suite") +} diff --git a/integration/init/init_test.go b/integration/init/init_test.go new file mode 100644 index 00000000..deaad1ad --- /dev/null +++ b/integration/init/init_test.go @@ -0,0 +1,186 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package init + +import ( + "context" + "os" + + apis "github.com/IBM-Blockchain/fabric-operator/api" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + cfconfig "github.com/cloudflare/cfssl/config" + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/tls" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" +) + +const ( + rootDir = "root-home" + interDir = "inter-home" + + tlsCertFile = "../../../testdata/init/peer/tls-cert.pem" + tlsKeyFile = "../../../testdata/init/peer/tls-key.pem" +) + +var ( + root *lib.Server + inter *lib.Server + client controllerclient.Client + scheme *runtime.Scheme + namespace string + + kclient *kubernetes.Clientset +) + +var _ = BeforeSuite(func() { + cfg, err := config.GetConfig() + Expect(err).NotTo(HaveOccurred()) + + namespace = os.Getenv("OPERATOR_NAMESPACE") + if namespace == "" { + namespace = "operator-test" + } + mgr, err := manager.New(cfg, manager.Options{ + Namespace: namespace, + }) + Expect(err).NotTo(HaveOccurred()) + + err = apis.AddToScheme(mgr.GetScheme()) + Expect(err).NotTo(HaveOccurred()) + go mgr.Start(signals.SetupSignalHandler()) + + client = controllerclient.New(mgr.GetClient(), &global.ConfigSetter{}) + scheme = mgr.GetScheme() + + kclient, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + ns := &corev1.Namespace{} + ns.Name = namespace + err = client.Create(context.TODO(), ns) + Expect(err).NotTo(HaveOccurred()) + + // Setup root server + root = SetupServer(rootDir, "", 7054, nil) + err = root.Start() + Expect(err).NotTo(HaveOccurred()) + + // Setup intermediate server + tlsConfig := &tls.ServerTLSConfig{ + Enabled: true, + CertFile: tlsCertFile, + KeyFile: tlsKeyFile, + } + inter = SetupServer(interDir, "http://admin:adminpw@localhost:7054", 7055, tlsConfig) + err = inter.Start() + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + err := root.Stop() + Expect(err).NotTo(HaveOccurred()) + + err = inter.Stop() + Expect(err).NotTo(HaveOccurred()) + + err = os.RemoveAll(rootDir) + Expect(err).NotTo(HaveOccurred()) + + err = os.RemoveAll(interDir) + Expect(err).NotTo(HaveOccurred()) + + ns := &corev1.Namespace{} + ns.Name = namespace + err = client.Delete(context.TODO(), ns) + Expect(err).NotTo(HaveOccurred()) +}) + +func SetupServer(homeDir string, parentURL string, port int, tlsConfig *tls.ServerTLSConfig) *lib.Server { + affiliations := map[string]interface{}{ + "hyperledger": map[string]interface{}{ + "fabric": []string{"ledger", "orderer", "security"}, + "fabric-ca": nil, + "sdk": nil, + }, + "org2": []string{"dept1"}, + "org1": nil, + "org2dept1": nil, + } + profiles := map[string]*cfconfig.SigningProfile{ + "tls": &cfconfig.SigningProfile{ + Usage: []string{"signing", "key encipherment", "server auth", "client auth", "key agreement"}, + ExpiryString: "8760h", + }, + "ca": &cfconfig.SigningProfile{ + Usage: []string{"cert sign", "crl sign"}, + ExpiryString: "8760h", + CAConstraint: cfconfig.CAConstraint{ + IsCA: true, + MaxPathLen: 0, + }, + }, + } + defaultProfile := &cfconfig.SigningProfile{ + Usage: []string{"cert sign"}, + ExpiryString: "8760h", + } + srv := &lib.Server{ + Config: &lib.ServerConfig{ + Port: port, + Debug: true, + }, + CA: lib.CA{ + Config: &lib.CAConfig{ + Intermediate: lib.IntermediateCA{ + ParentServer: lib.ParentServer{ + URL: parentURL, + }, + }, + Affiliations: affiliations, + Registry: lib.CAConfigRegistry{ + MaxEnrollments: -1, + }, + Signing: &cfconfig.Signing{ + Profiles: profiles, + Default: defaultProfile, + }, + Version: "1.1.0", // The default test server/ca should use the latest version + }, + }, + HomeDir: homeDir, + } + + if tlsConfig != nil { + srv.Config.TLS = *tlsConfig + } + // The bootstrap user's affiliation is the empty string, which + // means the user is at the affiliation root + err := srv.RegisterBootstrapUser("admin", "adminpw", "") + Expect(err).NotTo(HaveOccurred()) + + return srv +} diff --git a/integration/init/orderer_test.go b/integration/init/orderer_test.go new file mode 100644 index 00000000..180f6abd --- /dev/null +++ b/integration/init/orderer_test.go @@ -0,0 +1,310 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package init + +import ( + "context" + "os" + "path/filepath" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/secretmanager" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + testDirOrderer = "orderer-init-test" +) + +var _ = Describe("Orderer init", func() { + var ( + err error + ordererInit *initializer.Initializer + instance *current.IBPOrderer + orderer *baseorderer.Node + ) + + BeforeEach(func() { + ordererInit = &initializer.Initializer{ + Config: &initializer.Config{ + OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), + }, + Client: client, + Scheme: scheme, + } + ordererInit.SecretManager = secretmanager.New(client, scheme, ordererInit.GetLabels) + + orderer = &baseorderer.Node{ + Client: client, + Initializer: ordererInit, + DeploymentManager: &mocks.DeploymentManager{}, + } + + }) + + Context("msp spec", func() { + var ( + msp *current.MSP + ) + + BeforeEach(func() { + msp = ¤t.MSP{ + KeyStore: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==", + SignCerts: testcert, + AdminCerts: []string{testcert}, + CACerts: []string{testcert}, + IntermediateCerts: []string{testcert}, + } + + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Secret: ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: msp, + TLS: msp, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + }, + } + instance.Namespace = namespace + instance.Name = "testorderer2node0" + + err := client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("parses orderer msp", func() { + BeforeEach(func() { + ordererinit, err := ordererInit.GetInitOrderer(instance, "") + Expect(err).NotTo(HaveOccurred()) + + oconfig, err := ordererconfig.ReadOrdererFile("../../defaultconfig/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + ordererinit.Config = oconfig + + err = orderer.InitializeCreate(instance, ordererinit) + Expect(err).NotTo(HaveOccurred()) + }) + + It("gets ecert crypto", func() { + By("creating a secret containing admin certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer2node0-admincerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["admincert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer2node0-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer2node0-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer2node0-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer2node0-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + + By("creating a secret containing TLS ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer2node0-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer2node0-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer2node0-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer2node0-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + }) + }) + }) + + Context("enrollment spec", func() { + var ( + enrollment *current.Enrollment + ) + + BeforeEach(func() { + enrollment = ¤t.Enrollment{ + CAHost: "localhost", + CAPort: "7055", + EnrollID: "admin", + EnrollSecret: "adminpw", + AdminCerts: []string{testcert}, + CATLS: ¤t.CATLS{ + CACert: testcert, + }, + } + + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: enrollment, + TLS: enrollment, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + }, + } + instance.Namespace = namespace + instance.Name = "testorderer1node0" + + err := client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + err = os.RemoveAll(testDirOrderer) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("enrolls orderer with fabric ca server", func() { + BeforeEach(func() { + ordererinit, err := ordererInit.GetInitOrderer(instance, testDirOrderer) + Expect(err).NotTo(HaveOccurred()) + + oconfig, err := ordererconfig.ReadOrdererFile("../../defaultconfig/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + ordererinit.Config = oconfig + + err = orderer.InitializeCreate(instance, ordererinit) + Expect(err).NotTo(HaveOccurred()) + }) + + It("gets enrollment crypto", func() { + By("creating a secret containing ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer1node0-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer1node0-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer1node0-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testorderer1node0-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + + By("creating a secret containing TLS ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer1node0-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer1node0-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer1node0-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testorderer1node0-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + }) + }) + }) +}) diff --git a/integration/init/peer_test.go b/integration/init/peer_test.go new file mode 100644 index 00000000..b726213b --- /dev/null +++ b/integration/init/peer_test.go @@ -0,0 +1,337 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package init + +import ( + "context" + "crypto/x509" + "encoding/pem" + "os" + "path/filepath" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + operatorconfig "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/validator" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + testDir = "peer-init-test" + testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNiekNDQWhhZ0F3SUJBZ0lVUE1MTUZ3cmMwZUV2ZlhWV3FEN0pCVnNrdVQ4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TVRBd09ERTNNelF3TUZvWERUSTFNVEF3TnpFM016UXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJLcHdXMTNsY2hBbXBuVlUKbWZXUi9TYXR5b3hSYkpZL1ZtZDQ3RlZtVFRRelA2b3phczlrdzdZZFU4cHV1U0JSWlV5c2paS29nNlpJaFAxaQpwcmt0VmlHamdaWXdnWk13RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJRQVJWTlVRU0dCVEJvbmhTa3gKSDNVK3VtYlg5akFmQmdOVkhTTUVHREFXZ0JSWkdVRktPNk9qL2NXY29vUFVxM1p1blBUeWpqQVVCZ05WSFJFRQpEVEFMZ2dsc2IyTmhiR2h2YzNRd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ2ExZk9Od3VicWFlVWlPNGdhVjZICld1QW9TQ1haU2NTNWNkWEo1WUJER2djQ0lGNUNPQVNzekZJbEJBSTJ1VnltaHVhWnlyVFJIVEZHUzJ5OHBPMWcKSG5VNgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + defaultConfigs = "../../defaultconfig" +) + +var _ = Describe("Peer init", func() { + var ( + err error + peerInit *initializer.Initializer + instance *current.IBPPeer + peer *basepeer.Peer + ) + + BeforeEach(func() { + peer = &basepeer.Peer{ + Client: client, + Initializer: peerInit, + DeploymentManager: &mocks.DeploymentManager{}, + Config: &operatorconfig.Config{ + PeerInitConfig: &peerinit.Config{}, + }, + } + + config := &initializer.Config{ + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + CorePeerFile: filepath.Join(defaultConfigs, "peer/core.yaml"), + CorePeerV2File: filepath.Join(defaultConfigs, "peer/v2/core.yaml"), + } + validator := &validator.Validator{ + Client: client, + } + + peerInit = initializer.New(config, scheme, client, peer.GetLabels, validator, enroller.HSMEnrollJobTimeouts{}) + peer.Initializer = peerInit + }) + + Context("msp spec", func() { + var ( + msp *current.MSP + ) + + BeforeEach(func() { + msp = ¤t.MSP{ + KeyStore: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==", + SignCerts: testcert, + AdminCerts: []string{testcert}, + CACerts: []string{testcert}, + IntermediateCerts: []string{testcert}, + } + + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + MSP: ¤t.MSPSpec{ + Component: msp, + TLS: msp, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + }, + } + instance.Namespace = namespace + instance.Name = "testpeer2" + + err := client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + + }) + + Context("parses peer msp", func() { + BeforeEach(func() { + peerinit, err := peerInit.GetInitPeer(instance, "") + Expect(err).NotTo(HaveOccurred()) + peerinit.Config = &config.Core{} + + err = peer.InitializeCreate(instance, peerinit) + Expect(err).NotTo(HaveOccurred()) + }) + + It("gets ecert crypto", func() { + By("creating a secret containing admin certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer2-admincerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["admincert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer2-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer2-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer2-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer2-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + + By("creating a secret containing TLS ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer2-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer2-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer2-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer2-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + }) + }) + }) + + Context("enrollment spec", func() { + var ( + enrollment *current.Enrollment + ) + + BeforeEach(func() { + enrollment = ¤t.Enrollment{ + CAHost: "localhost", + CAPort: "7055", + EnrollID: "admin", + EnrollSecret: "adminpw", + AdminCerts: []string{testcert}, + CATLS: ¤t.CATLS{ + CACert: testcert, + }, + } + + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: enrollment, + TLS: enrollment, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + }, + } + instance.Namespace = namespace + instance.Name = "testpeer1" + + err := client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + err = os.RemoveAll(testDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("enrolls peer with fabric ca server", func() { + BeforeEach(func() { + peerinit, err := peerInit.GetInitPeer(instance, testDir) + Expect(err).NotTo(HaveOccurred()) + peerinit.Config = &config.Core{} + + err = peer.InitializeCreate(instance, peerinit) + Expect(err).NotTo(HaveOccurred()) + }) + + It("gets enrollment crypto", func() { + By("creating a secret containing admin certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer1-admincerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["admincert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer1-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer1-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer1-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-testpeer1-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + + By("creating a secret containing TLS ca root certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer1-cacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cacert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS ca intermediate certs", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer1-intercerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["intercert-0.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS signed cert", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer1-signcert", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + certBytes := secret.Data["cert.pem"] + VerifyCertData(certBytes) + }) + + By("creating a secret containing TLS private key", func() { + secret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-testpeer1-keystore", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(secret.Data)).To(Equal(1)) + keyBytes := secret.Data["key.pem"] + VerifyKeyData(keyBytes) + }) + }) + }) + }) +}) + +func VerifyKeyData(data []byte) { + block, _ := pem.Decode(data) + Expect(block).NotTo(BeNil()) + _, err := x509.ParsePKCS8PrivateKey(block.Bytes) + Expect(err).NotTo(HaveOccurred()) +} + +func VerifyCertData(data []byte) { + block, _ := pem.Decode(data) + Expect(block).NotTo(BeNil()) + _, err := x509.ParseCertificate(block.Bytes) + Expect(err).NotTo(HaveOccurred()) +} diff --git a/integration/integration.go b/integration/integration.go new file mode 100644 index 00000000..2e45ba50 --- /dev/null +++ b/integration/integration.go @@ -0,0 +1,491 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + "github.com/IBM-Blockchain/fabric-operator/pkg/command" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +const ( + TestAutomation1IngressDomain = "vcap.me" +) + +var ( + defaultConfigs = "../../defaultconfig" + defaultDef = "../../definitions" + + operatorCfg *config.Config + operatorContext context.Context + operatorCancelFunc context.CancelFunc +) + +type Config struct { + OperatorServiceAccount string + OperatorRole string + OperatorRoleBinding string + OperatorDeployment string + OrdererSecret string + PeerSecret string + ConsoleTLSSecret string +} + +func SetupSignalHandler() context.Context { + operatorContext, operatorCancelFunc = context.WithCancel(context.Background()) + return operatorContext +} + +func Setup(ginkgoWriter io.Writer, cfg *Config, suffix, pathToDefaultDir string) (string, *kubernetes.Clientset, *ibpclient.IBPClient, error) { + // Set up a signal handler Context to allow a graceful shutdown of the operator. + SetupSignalHandler() + + var err error + + if pathToDefaultDir != "" { + defaultConfigs = filepath.Join(pathToDefaultDir, "defaultconfig") + defaultDef = filepath.Join(pathToDefaultDir, "definitions") + } + operatorCfg = getOperatorCfg() + + wd, err := os.Getwd() + if err != nil { + return "", nil, nil, err + } + fmt.Fprintf(ginkgoWriter, "Working directory: %s\n", wd) + + namespace := os.Getenv("OPERATOR_NAMESPACE") + if namespace == "" { + namespace = "operatortest" + } + if suffix != "" { + namespace = fmt.Sprintf("%s%s", namespace, suffix) + } + + fmt.Fprintf(ginkgoWriter, "Namespace set to '%s'\n", namespace) + + setupConfig, err := GetConfig() + if err != nil { + return "", nil, nil, err + } + + fmt.Fprintf(ginkgoWriter, "Setup config %+v\n", setupConfig) + + kclient, ibpCRClient, err := InitClients(setupConfig) + if err != nil { + return "", nil, nil, err + } + + err = os.Setenv("CLUSTERTYPE", "K8S") + if err != nil { + return "", nil, nil, err + } + err = os.Setenv("WATCH_NAMESPACE", namespace) + if err != nil { + return "", nil, nil, err + } + + err = CleanupNamespace(ginkgoWriter, kclient, namespace) + if err != nil { + return "", nil, nil, err + } + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + _, err = kclient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + if err != nil { + return "", nil, nil, err + } + fmt.Fprintf(ginkgoWriter, "Namespace '%s' created\n", namespace) + + // Set up an image pull secret if a docker config json has been specified + if setupConfig.DockerConfigJson != "" { + fmt.Fprintf(ginkgoWriter, "Creating 'regcred' image pull secret for DOCKERCONFIGJSON") + + err = CreatePullSecret(kclient, "regcred", namespace, setupConfig.DockerConfigJson) + if err != nil { + return "", nil, nil, err + } + } + + err = DeployOperator(ginkgoWriter, operatorContext, cfg, kclient, namespace) + if err != nil { + return "", nil, nil, err + } + + return namespace, kclient, ibpCRClient, nil +} + +func deleteNamespace(ginkgoWriter io.Writer, kclient *kubernetes.Clientset, namespace string) error { + var zero int64 = 0 + policy := metav1.DeletePropagationForeground + deleteOptions := metav1.DeleteOptions{ + GracePeriodSeconds: &zero, + PropagationPolicy: &policy, + } + fmt.Fprintf(ginkgoWriter, "Deleting namespace '%s' with options %s\n", namespace, &deleteOptions) + return kclient.CoreV1().Namespaces().Delete(context.TODO(), namespace, deleteOptions) +} + +type SetupConfig struct { + DockerConfigJson string + KubeConfig string +} + +func GetConfig() (*SetupConfig, error) { + return &SetupConfig{ + DockerConfigJson: os.Getenv("DOCKERCONFIGJSON"), + KubeConfig: os.Getenv("KUBECONFIG_PATH"), + }, nil +} + +func InitClients(setupConfig *SetupConfig) (*kubernetes.Clientset, *ibpclient.IBPClient, error) { + config, err := rest.InClusterConfig() + if err != nil { + // Not running in a cluster, get kube config from KUBECONFIG env var + kubeConfigPath := setupConfig.KubeConfig + config, err = clientcmd.BuildConfigFromFlags("", kubeConfigPath) + if err != nil { + fmt.Println("error:", err) + return nil, nil, err + } + } + + kclient, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, nil, err + } + + client, err := ibpclient.New(config) + if err != nil { + return nil, nil, err + } + + return kclient, client, nil +} + +func DeployOperator(ginkgoWriter io.Writer, signal context.Context, cfg *Config, kclient *kubernetes.Clientset, namespace string) error { + fmt.Fprintf(ginkgoWriter, "Deploying operator in namespace '%s'\n", namespace) + // Create service account for operator + sa, err := util.GetServiceAccountFromFile(cfg.OperatorServiceAccount) + if err != nil { + return err + } + _, err = kclient.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) + if err != nil { + return err + } + + // Create cluster role with permissions required by operator + role, err := util.GetClusterRoleFromFile(cfg.OperatorRole) + if err != nil { + return err + } + _, err = kclient.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{}) + if err != nil { + if !k8serrors.IsAlreadyExists(err) { + return err + } + } + + // Create role binding for operator's cluster role + roleBinding, err := util.GetClusterRoleBindingFromFile(cfg.OperatorRoleBinding) + if err != nil { + return err + } + + roleBinding.Name = fmt.Sprintf("operator-%s", namespace) + roleBinding.Subjects[0].Namespace = namespace + + _, err = kclient.RbacV1().ClusterRoleBindings().Create(context.TODO(), roleBinding, metav1.CreateOptions{}) + if err != nil { + if !k8serrors.IsAlreadyExists(err) { + return err + } + } + + // Create resource secrets + ordererSecret, err := util.GetSecretFromFile(cfg.OrdererSecret) + if err != nil { + return err + } + _, err = kclient.CoreV1().Secrets(namespace).Create(context.TODO(), ordererSecret, metav1.CreateOptions{}) + if err != nil { + return err + } + + // Peer 1 secret + peerSecret, err := util.GetSecretFromFile(cfg.PeerSecret) + if err != nil { + return err + } + _, err = kclient.CoreV1().Secrets(namespace).Create(context.TODO(), peerSecret, metav1.CreateOptions{}) + if err != nil { + return err + } + + // Peer 2 secret + peerSecret.Name = "ibppeer2-secret" + _, err = kclient.CoreV1().Secrets(namespace).Create(context.TODO(), peerSecret, metav1.CreateOptions{}) + if err != nil { + return err + } + + consoleTLSSecret, err := util.GetSecretFromFile(cfg.ConsoleTLSSecret) + if err != nil { + return err + } + _, err = kclient.CoreV1().Secrets(namespace).Create(context.TODO(), consoleTLSSecret, metav1.CreateOptions{}) + if err != nil { + return err + } + + err = command.OperatorWithSignal(operatorCfg, signal, false, true) + if err != nil { + return err + } + + fmt.Fprintf(ginkgoWriter, "Done deploying operator in namespace '%s'\n", namespace) + + return nil +} + +func Cleanup(ginkgoWriter io.Writer, kclient *kubernetes.Clientset, namespace string) error { + + // The operator must halt before the namespace can be deleted in the foreground. + ShutdownOperator(ginkgoWriter) + + err := CleanupNamespace(ginkgoWriter, kclient, namespace) + if err != nil { + return err + } + + return nil +} + +func ShutdownOperator(ginkgoWriter io.Writer) { + if operatorContext != nil { + fmt.Fprintf(ginkgoWriter, "Stopping operator\n") + operatorContext.Done() + operatorCancelFunc() + } +} + +func CleanupNamespace(ginkgoWriter io.Writer, kclient *kubernetes.Clientset, namespace string) error { + err := deleteNamespace(ginkgoWriter, kclient, namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil // Namespace does not exist, don't need to wait for deletion to complete + } + } + + opts := metav1.ListOptions{} + watchNamespace, err := kclient.CoreV1().Namespaces().Watch(context.TODO(), opts) + if err != nil { + return err + } + + fmt.Fprintf(ginkgoWriter, "Waiting for namespace deletion\n") + for { + resultChan := <-watchNamespace.ResultChan() + if resultChan.Type == watch.Deleted { + ns := resultChan.Object.(*corev1.Namespace) + if ns.Name == namespace { + break + } + } + } + fmt.Fprintf(ginkgoWriter, "Done deleting namespace '%s'\n", namespace) + return nil +} + +func DeleteNamespace(ginkgoWriter io.Writer, kclient *kubernetes.Clientset, namespace string) error { + err := deleteNamespace(ginkgoWriter, kclient, namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil // Namespace does not exist, don't need to wait for deletion to complete + } + } + + return nil +} + +func CreatePullSecret(kclient *kubernetes.Clientset, name string, namespace string, dockerConfigJson string) error { + b, err := base64.StdEncoding.DecodeString(dockerConfigJson) + if err != nil { + return err + } + + pullSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Data: map[string][]byte{ + ".dockerconfigjson": b, + }, + Type: corev1.SecretTypeDockerConfigJson, + } + + _, err = kclient.CoreV1().Secrets(namespace).Create(context.TODO(), pullSecret, metav1.CreateOptions{}) + if err != nil { + return err + } + + return nil +} + +func ClearOperatorConfig(kclient *kubernetes.Clientset, namespace string) error { + err := kclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), "operator-config", *metav1.NewDeleteOptions(0)) + if !k8serrors.IsNotFound(err) { + return err + } + return nil +} + +func ResilientPatch(kclient *ibpclient.IBPClient, name, namespace, kind string, retry int, into client.Object, patch func(i client.Object)) error { + + for i := 0; i < retry; i++ { + err := resilientPatch(kclient, name, namespace, kind, into, patch) + if err != nil { + if i == retry { + return err + } + if k8serrors.IsConflict(err) { + time.Sleep(2 * time.Second) + continue + } + return err + } + } + + return nil +} + +func resilientPatch(kclient *ibpclient.IBPClient, name, namespace, kind string, into client.Object, patch func(i client.Object)) error { + result := kclient.Get().Namespace(namespace).Resource(kind).Name(name).Do(context.TODO()) + if result.Error() != nil { + return result.Error() + } + + err := result.Into(into) + if err != nil { + return err + } + + patch(into) + bytes, err := json.Marshal(into) + if err != nil { + return err + } + + result = kclient.Patch(types.MergePatchType).Namespace(namespace).Resource(kind).Name(name).Body(bytes).Do(context.TODO()) + if result.Error() != nil { + return result.Error() + } + + return nil +} + +func CreateOperatorConfigMapFromFile(namespace string, kclient *kubernetes.Clientset, file string) error { + configData, err := ioutil.ReadFile(filepath.Clean(file)) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "operator", + Namespace: namespace, + }, + Data: map[string]string{ + "config.yaml": string(configData), + }, + } + + _, err = kclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + if err != nil { + return err + } + + return nil +} + +// CreateConfigMap creates config map +func CreateConfigMap(kclient *kubernetes.Clientset, config interface{}, key, name, namespace string) error { + configBytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{ + key: string(configBytes), + }, + } + + _, err = kclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + if err != nil { + return err + } + + return nil +} + +func OperatorCfg() *config.Config { + return getOperatorCfg() +} + +func getOperatorCfg() *config.Config { + defaultPeerDef := filepath.Join(defaultDef, "peer") + defaultCADef := filepath.Join(defaultDef, "ca") + defaultOrdererDef := filepath.Join(defaultDef, "orderer") + defaultConsoleDef := filepath.Join(defaultDef, "console") + return GetOperatorConfig(defaultConfigs, defaultCADef, defaultPeerDef, defaultOrdererDef, defaultConsoleDef) +} diff --git a/integration/kind-config.yaml b/integration/kind-config.yaml new file mode 100644 index 00000000..873ce943 --- /dev/null +++ b/integration/kind-config.yaml @@ -0,0 +1,23 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 + protocol: TCP + listenAddress: 127.0.0.1 + - containerPort: 443 + hostPort: 443 + protocol: TCP + listenAddress: 127.0.0.1 +containerdConfigPatches: + - |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] diff --git a/integration/migration/fabric/fabric_suite_test.go b/integration/migration/fabric/fabric_suite_test.go new file mode 100644 index 00000000..902769b0 --- /dev/null +++ b/integration/migration/fabric/fabric_suite_test.go @@ -0,0 +1,239 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fabric_test + +import ( + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func TestFabric(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Fabric Suite") +} + +const ( + defaultConfigs = "../../../defaultconfig" + defaultPeerDef = "../../../definitions/peer" + defaultCADef = "../../../definitions/ca" + defaultOrdererDef = "../../../definitions/orderer" + defaultConsoleDef = "../../../definitions/console" + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + domain = "vcap.me" +) + +var ( + namespaceSuffix = "migration" + + namespace string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + testFailed bool + wd string // Working directory of test +) + +var ( + err error + + org1ca *helper.CA + caHost string + tlsCert []byte +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(300 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cfg := &integration.Config{ + OperatorServiceAccount: "../../../config/rbac/service_account.yaml", + OperatorRole: "../../../config/rbac/role.yaml", + OperatorRoleBinding: "../../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../../testdata/deploy/operator.yaml", + OrdererSecret: "../../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, namespaceSuffix, "../../..") + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + startCA() + registerAndEnrollIDs() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) +}) + +func startCA() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsCert, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsCert) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsCert) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] +} + +func registerAndEnrollIDs() { + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession( + org1ca.Enroll("admin", "adminpw"), + "Enroll CA Admin", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession( + org1ca.Register(peerUsername, "peerpw", "peer"), + "Register User", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession( + org1ca.Register(ordererUsername, "ordererpw", "orderer"), + "Register User", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, "../../../scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func Org1CA() *helper.CA { + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "org1ca", + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + }, + }, + Zone: "select", + Region: "select", + Domain: domain, + FabricVersion: integration.FabricCAVersion, + }, + } + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/migration/fabric/orderer_test.go b/integration/migration/fabric/orderer_test.go new file mode 100644 index 00000000..ae1aeeea --- /dev/null +++ b/integration/migration/fabric/orderer_test.go @@ -0,0 +1,194 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fabric_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ordererUsername = "orderer" +) + +var _ = Describe("Fabric orderer migration", func() { + var ( + node1 *helper.Orderer + ) + + BeforeEach(func() { + orderer := GetOrderer() + err := helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + + node1 = &orderer.Nodes[0] + + By("starting orderer pod", func() { + Eventually(node1.PodIsRunning).Should((Equal(true))) + }) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("migration from v1.4.x to v2.x", func() { + BeforeEach(func() { + result := ibpCRClient. + Get(). + Namespace(namespace). + Resource("ibporderers"). + Name(node1.Name). + Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibporderer := ¤t.IBPOrderer{} + result.Into(ibporderer) + + ibporderer.Spec.Images.OrdererTag = integration.OrdererTag + ibporderer.Spec.FabricVersion = integration.FabricVersion + + bytes, err := json.Marshal(ibporderer) + Expect(err).NotTo(HaveOccurred()) + + // Update the orderer's CR spec + result = ibpCRClient. + Put(). + Namespace(namespace). + Resource("ibporderers"). + Name(node1.Name). + Body(bytes). + Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + It("terminates pod", func() { + Eventually(func() int { + return len(node1.GetRunningPods()) + }).Should((Equal(0))) + }) + + It("restarts pod", func() { + Eventually(node1.PodIsRunning).Should((Equal(true))) + }) + }) +}) + +func GetOrderer() *helper.Orderer { + enrollment := ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "ca", + CATLS: ¤t.CATLS{ + CACert: base64.StdEncoding.EncodeToString(tlsCert), + }, + EnrollID: ordererUsername, + EnrollSecret: "ordererpw", + }, + TLS: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "tlsca", + CATLS: ¤t.CATLS{ + CACert: base64.StdEncoding.EncodeToString(tlsCert), + }, + EnrollID: ordererUsername, + EnrollSecret: "ordererpw", + }, + } + + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ibporderer1", + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + ClusterSize: 1, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + GenesisProfile: "Initial", + Domain: domain, + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.Orderer14Tag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + ClusterSecret: []*current.SecretSpec{ + ¤t.SecretSpec{ + Enrollment: enrollment, + }, + }, + FabricVersion: "1.4.12", + }, + } + + nodes := []helper.Orderer{ + helper.Orderer{ + Name: cr.Name + "node1", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = cr.Name + "node1" + + return &helper.Orderer{ + Name: cr.Name, + Namespace: namespace, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + Nodes: nodes, + } +} diff --git a/integration/migration/fabric/peer_test.go b/integration/migration/fabric/peer_test.go new file mode 100644 index 00000000..7186c66f --- /dev/null +++ b/integration/migration/fabric/peer_test.go @@ -0,0 +1,214 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fabric_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + peerUsername = "peer" +) + +var _ = Describe("Fabric peer migration", func() { + var ( + peer *helper.Peer + ) + + BeforeEach(func() { + peer = GetPeer() + err := helper.CreatePeer(ibpCRClient, peer.CR) + Expect(err).NotTo(HaveOccurred()) + + By("starting peer pod", func() { + Eventually(peer.PodIsRunning).Should((Equal(true))) + }) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("migration from v1.4.x to v2.x peer", func() { + BeforeEach(func() { + result := ibpCRClient. + Get(). + Namespace(namespace). + Resource("ibppeers"). + Name(peer.Name). + Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibppeer := ¤t.IBPPeer{} + result.Into(ibppeer) + + ibppeer.Spec.Images.PeerTag = integration.PeerTag + ibppeer.Spec.FabricVersion = version.V2_2_5 + + bytes, err := json.Marshal(ibppeer) + Expect(err).NotTo(HaveOccurred()) + + // Update the peer's CR spec + result = ibpCRClient. + Put(). + Namespace(namespace). + Resource("ibppeers"). + Name(ibppeer.Name). + Body(bytes). + Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + It("migrates", func() { + By("starting migration job", func() { + Eventually(func() bool { + dbmigrationJobName, err := helper.GetJobID(kclient, namespace, fmt.Sprintf("%s-dbmigration", peer.CR.Name)) + if err != nil { + return false + } + + _, err = kclient.BatchV1().Jobs(namespace). + Get(context.TODO(), dbmigrationJobName, metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("starting peer pod", func() { + Eventually(func() int { + deps := peer.DeploymentList() + dep := deps.Items[0] + return len(dep.Spec.Template.Spec.Containers) + }).Should(Equal(4)) + Eventually(peer.PodIsRunning).Should((Equal(true))) + }) + + By("adding chaincode launcher container and removing dind", func() { + deps := peer.DeploymentList() + dep := deps.Items[0] + + containerNames := []string{} + for _, cont := range dep.Spec.Template.Spec.Containers { + containerNames = append(containerNames, cont.Name) + } + + Expect(containerNames).To(ContainElement("chaincode-launcher")) + Expect(containerNames).NotTo(ContainElement("dind")) + }) + }) + }) +}) + +// TODO:OSS +func GetPeer() *helper.Peer { + name := "ibppeer1" + cr := ¤t.IBPPeer{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPPeer", + APIVersion: "ibp.com/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPPeerSpec{ + License: current.License{ + Accept: true, + }, + MSPID: "test-peer-mspid", + Region: "select", + Zone: "select", + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.PeerImages{ + // TODO: OSS + CouchDBImage: "ghcr.io/ibm-blockchain/couchdb", + CouchDBTag: "2.3.1-20210826-amd64", + // do not change dind tag, it is used for loading dind faster + DindImage: "ghcr.io/ibm-blockchain/dind", + DindTag: "noimages-amd64", + FluentdImage: "ghcr.io/ibm-blockchain/fluentd", + FluentdTag: "1.0.0-20210826-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb", + GRPCWebTag: "1.0.0-20210826-amd64", + PeerImage: "ghcr.io/ibm-blockchain/peer", + PeerTag: "1.4.12-20210826-amd64", + PeerInitImage: "ghcr.io/ibm-blockchain/init", + PeerInitTag: "1.0.0-20210826-amd64", + EnrollerImage: "ghcr.io/ibm-blockchain/enroller", + EnrollerTag: "1.0.0-20210826-amd64", + }, + Domain: domain, + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "ca", + CATLS: ¤t.CATLS{ + CACert: base64.StdEncoding.EncodeToString(tlsCert), + }, + EnrollID: peerUsername, + EnrollSecret: "peerpw", + }, + TLS: ¤t.Enrollment{ + CAHost: caHost, + CAPort: "443", + CAName: "tlsca", + CATLS: ¤t.CATLS{ + CACert: base64.StdEncoding.EncodeToString(tlsCert), + }, + EnrollID: peerUsername, + EnrollSecret: "peerpw", + }, + }, + }, + FabricVersion: "1.4.12", + }, + } + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/migration/migration_suite_test.go b/integration/migration/migration_suite_test.go new file mode 100644 index 00000000..5306ce39 --- /dev/null +++ b/integration/migration/migration_suite_test.go @@ -0,0 +1,125 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package migration_test + +import ( + "context" + "fmt" + "os" + "testing" + + apis "github.com/IBM-Blockchain/fabric-operator/api" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +func TestMigration(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Migration Suite") +} + +var ( + kclient *kubernetes.Clientset + client controllerclient.Client + scheme *runtime.Scheme + namespace string + mgr manager.Manager + killchan context.Context +) + +var _ = BeforeSuite(func() { + var err error + cfg, err := config.GetConfig() + Expect(err).NotTo(HaveOccurred()) + + namespace = os.Getenv("OPERATOR_NAMESPACE") + if namespace == "" { + namespace = "operator-test" + } + namespace = fmt.Sprintf("%s-migration", namespace) + + mgr, err = manager.New(cfg, manager.Options{ + Namespace: namespace, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred()) + + err = apis.AddToScheme(mgr.GetScheme()) + Expect(err).NotTo(HaveOccurred()) + + killchan = context.TODO() + go mgr.Start(killchan) + + client = controllerclient.New(mgr.GetClient(), &global.ConfigSetter{}) + scheme = mgr.GetScheme() + + kclient, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + cleanup() + + ns := &corev1.Namespace{} + ns.Name = namespace + err = client.Create(context.TODO(), ns) + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + err := cleanup() + Expect(err).NotTo(HaveOccurred()) + + killchan.Done() +}) + +func cleanup() error { + ns := &corev1.Namespace{} + ns.Name = namespace + + err := client.Delete(context.TODO(), ns) + if err != nil { + return err + } + + opts := metav1.ListOptions{} + watchNamespace, err := kclient.CoreV1().Namespaces().Watch(context.TODO(), opts) + if err != nil { + return err + } + + for { + resultChan := <-watchNamespace.ResultChan() + if resultChan.Type == watch.Deleted { + ns := resultChan.Object.(*corev1.Namespace) + if ns.Name == namespace { + break + } + } + } + + return nil +} diff --git a/integration/migration/migration_test.go b/integration/migration/migration_test.go new file mode 100644 index 00000000..06b3e2de --- /dev/null +++ b/integration/migration/migration_test.go @@ -0,0 +1,1003 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package migration_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "path/filepath" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + cainit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mocks" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + allmigrator "github.com/IBM-Blockchain/fabric-operator/pkg/migrator" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/initsecret" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func GetLabels(instance v1.Object) map[string]string { + return map[string]string{ + "app": "peermigraton", + } +} + +func RandomNodePort() int32 { + rand.Seed(time.Now().UnixNano()) + min := 30000 + max := 32767 + return int32(rand.Intn(max-min+1) + min) +} + +// TODO api versioning/migration logic will be updated +var _ = PDescribe("migrating", func() { + Context("ca", func() { + var ( + migrator *allmigrator.Migrator + instance *current.IBPCA + httpNodePort int32 + operationNodePort int32 + ) + + BeforeEach(func() { + logf.SetLogger(zap.New()) + + defaultConfigs := "../../defaultconfig" + of, err := offering.GetType("K8S") + Expect(err).To(BeNil()) + + operatorCfg := &config.Config{ + CAInitConfig: &cainit.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "ca/ca.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "ca/tlsca.yaml"), + SharedPath: "/shared", + }, + PeerInitConfig: &peerinit.Config{ + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + }, + OrdererInitConfig: &ordererinit.Config{ + OrdererFile: filepath.Join(defaultConfigs, "orderer/orderer.yaml"), + ConfigTxFile: filepath.Join(defaultConfigs, "orderer/configtx.yaml"), + OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), + }, + Offering: of, + } + + migrator = allmigrator.New(mgr, operatorCfg, namespace) + + consoleinstance := ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consolemigration0", + Namespace: namespace, + }, + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain", + }, + }, + Status: current.IBPConsoleStatus{ + CRStatus: current.CRStatus{ + Status: current.True, + Version: version.V213, + }, + }, + } + err = client.Create(context.TODO(), consoleinstance) + Expect(err).NotTo(HaveOccurred()) + + err = client.UpdateStatus(context.TODO(), consoleinstance) + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "camigration", + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + FabricVersion: integration.FabricCAVersion, + }, + Status: current.IBPCAStatus{ + CRStatus: current.CRStatus{ + Status: current.True, + }, + }, + } + err = client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + err = client.UpdateStatus(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + + operationNodePort = RandomNodePort() + httpNodePort = RandomNodePort() + service := &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + corev1.ServicePort{ + Name: "http", + Port: int32(7054), + NodePort: httpNodePort, + }, + corev1.ServicePort{ + Name: "operations", + Port: int32(9443), + NodePort: operationNodePort, + }, + }, + }, + } + service.Name = "camigration-service" + service.Namespace = namespace + + httpNodePort, operationNodePort = CreateServiceWithRetry(service, 3) + pathType := networkingv1.PathTypeImplementationSpecific + ingress := &networkingv1.Ingress{ + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "camigration-service", + Port: networkingv1.ServiceBackendPort{ + Number: 443, + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + }, + } + ingress.Name = "camigration" + ingress.Namespace = namespace + + err = client.Create(context.TODO(), ingress) + Expect(err).NotTo(HaveOccurred()) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-ca", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), cm) + Expect(err).NotTo(HaveOccurred()) + + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-overrides", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), cm) + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-ca", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), secret) + Expect(err).NotTo(HaveOccurred()) + + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-tlsca", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), secret) + Expect(err).NotTo(HaveOccurred()) + }) + + It("migrates ca resources", func() { + err := migrator.Migrate() + Expect(err).NotTo(HaveOccurred()) + + By("creating a secret with state of current resources before migration", func() { + var secret *corev1.Secret + var err error + + Eventually(func() bool { + secret, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "camigration-oldstate", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(secret.Data["camigration-service"]).NotTo(Equal("")) + Expect(secret.Data["camigration-cm-ca"]).NotTo(Equal("")) + Expect(secret.Data["camigration-cm-overrides"]).NotTo(Equal("")) + Expect(secret.Data["camigration-secret-ca"]).NotTo(Equal("")) + Expect(secret.Data["camigration-secret-tlsca"]).NotTo(Equal("")) + }) + + By("creating a new service with no 'service' in name and same nodeport", func() { + var service *corev1.Service + var err error + + Eventually(func() bool { + service, err = kclient.CoreV1().Services(namespace).Get(context.TODO(), "camigration", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(service.Spec.Ports[0].NodePort).To(Equal(httpNodePort)) + Expect(service.Spec.Ports[1].NodePort).To(Equal(operationNodePort)) + + _, err = kclient.CoreV1().Services(namespace).Get(context.TODO(), "camigration-service", metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + By("creating a new ingress with no dashes and same servicename", func() { + var ingress *networkingv1.Ingress + var err error + + Eventually(func() bool { + ingress, err = kclient.NetworkingV1().Ingresses(namespace).Get(context.TODO(), "camigration", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal("camigration")) + }) + }) + }) + + Context("console", func() { + var ( + migrator *allmigrator.Migrator + instance *current.IBPConsole + httpNodePort int32 + operationNodePort int32 + ) + + BeforeEach(func() { + logf.SetLogger(zap.New()) + + defaultConfigs := "../../defaultconfig" + of, err := offering.GetType("K8S") + + operatorCfg := &config.Config{ + CAInitConfig: &cainit.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "ca/ca.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "ca/tlsca.yaml"), + SharedPath: "/shared", + }, + PeerInitConfig: &peerinit.Config{ + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + }, + OrdererInitConfig: &ordererinit.Config{ + OrdererFile: filepath.Join(defaultConfigs, "orderer/orderer.yaml"), + ConfigTxFile: filepath.Join(defaultConfigs, "orderer/configtx.yaml"), + OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), + }, + Offering: of, + } + + migrator = allmigrator.New(mgr, operatorCfg, namespace) + + instance = ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consolemigration", + Namespace: namespace, + }, + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{}, + }, + Status: current.IBPConsoleStatus{ + CRStatus: current.CRStatus{ + Status: current.True, + }, + }, + } + err = client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + err = client.UpdateStatus(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + + operationNodePort = RandomNodePort() + httpNodePort = RandomNodePort() + service := &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + corev1.ServicePort{ + Name: "http", + Port: int32(7054), + NodePort: httpNodePort, + }, + corev1.ServicePort{ + Name: "operations", + Port: int32(9443), + NodePort: operationNodePort, + }, + }, + }, + } + service.Name = "consolemigration-service" + service.Namespace = namespace + + httpNodePort, operationNodePort = CreateServiceWithRetry(service, 3) + + ingress := &networkingv1.Ingress{ + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "consolemigration-service", + Port: networkingv1.ServiceBackendPort{ + Number: 443, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + ingress.Name = "consolemigration" + ingress.Namespace = namespace + + err = client.Create(context.TODO(), ingress) + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-console-pw", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), secret) + Expect(err).NotTo(HaveOccurred()) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-configmap", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), cm) + Expect(err).NotTo(HaveOccurred()) + + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-deployer-template", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), cm) + Expect(err).NotTo(HaveOccurred()) + + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-template-configmap", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), cm) + Expect(err).NotTo(HaveOccurred()) + + n := types.NamespacedName{ + Name: cm.GetName(), + Namespace: cm.GetNamespace(), + } + + err = wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { + err := client.Get(context.TODO(), n, cm) + if err == nil { + return true, nil + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("migrates console resources", func() { + err := migrator.Migrate() + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + _, err := kclient.CoreV1().Services(namespace).Get(context.TODO(), "consolemigration-service", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(false)) + + By("creating a secret with state of current resources before migration", func() { + var secret *corev1.Secret + var err error + + Eventually(func() bool { + secret, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "consolemigration-oldstate", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(secret.Data["consolemigration-service"]).NotTo(Equal("")) + Expect(secret.Data["consolemigration-cm"]).NotTo(Equal("")) + Expect(secret.Data["consolemigration-cm-deployer"]).NotTo(Equal("")) + Expect(secret.Data["consolemigration-cm-template"]).NotTo(Equal("")) + Expect(secret.Data["consolemigration-secret-pw"]).NotTo(Equal("")) + }) + + By("creating a new service with 'service' and same nodeport", func() { + var service *corev1.Service + var err error + + Eventually(func() bool { + service, err = kclient.CoreV1().Services(namespace).Get(context.TODO(), "consolemigration", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(service.Spec.Ports[0].NodePort).To(Equal(httpNodePort)) + Expect(service.Spec.Ports[1].NodePort).To(Equal(operationNodePort)) + }) + + By("creating a new ingress with no dashes and same servicename", func() { + var ingress *networkingv1.Ingress + var err error + + Eventually(func() bool { + ingress, err = kclient.NetworkingV1().Ingresses(namespace).Get(context.TODO(), "consolemigration", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal("consolemigration")) + }) + }) + }) + + Context("peer", func() { + var ( + migrator *allmigrator.Migrator + instance *current.IBPPeer + mspSecret *initsecret.Secret + peerApiNodePort int32 + operationNodePort int32 + grpcwebDebugNodePort int32 + grpcwebNodePort int32 + ) + + BeforeEach(func() { + logf.SetLogger(zap.New()) + mockValidator := &mocks.CryptoValidator{} + mockValidator.CheckEcertCryptoReturns(errors.New("not found")) + + defaultConfigs := "../../defaultconfig" + of, err := offering.GetType("K8S") + + operatorCfg := &config.Config{ + CAInitConfig: &cainit.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "ca/ca.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "ca/tlsca.yaml"), + SharedPath: "/shared", + }, + PeerInitConfig: &peerinit.Config{ + CorePeerFile: filepath.Join(defaultConfigs, "peer/core.yaml"), + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + }, + OrdererInitConfig: &ordererinit.Config{ + OrdererFile: filepath.Join(defaultConfigs, "orderer/orderer.yaml"), + ConfigTxFile: filepath.Join(defaultConfigs, "orderer/configtx.yaml"), + OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), + }, + Offering: of, + } + + migrator = allmigrator.New(mgr, operatorCfg, namespace) + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peermigration", + Namespace: namespace, + }, + Spec: current.IBPPeerSpec{ + Domain: "127.0.0.1", + ImagePullSecrets: []string{"pullSecret"}, + Images: ¤t.PeerImages{ + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + }, + }, + Status: current.IBPPeerStatus{ + CRStatus: current.CRStatus{ + Status: current.True, + }, + }, + } + err = client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + err = client.UpdateStatus(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + + peerApiNodePort = RandomNodePort() + operationNodePort = RandomNodePort() + grpcwebDebugNodePort = RandomNodePort() + grpcwebNodePort = RandomNodePort() + service := &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + corev1.ServicePort{ + Name: "peer-api", + Port: int32(7051), + NodePort: peerApiNodePort, + }, + corev1.ServicePort{ + Name: "operations", + Port: int32(9443), + NodePort: operationNodePort, + }, + corev1.ServicePort{ + Name: "grpcweb-debug", + Port: int32(8080), + NodePort: grpcwebDebugNodePort, + }, + corev1.ServicePort{ + Name: "grpcweb", + Port: int32(7443), + NodePort: grpcwebNodePort, + }, + }, + }, + } + service.Name = "peermigration-service" + service.Namespace = namespace + + peerApiNodePort, operationNodePort, grpcwebDebugNodePort, grpcwebNodePort = CreatePeerServiceWithRetry(service, 3) + + ingress := &networkingv1.Ingress{ + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "peermigration-service", + Port: networkingv1.ServiceBackendPort{ + Number: 443, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + ingress.Name = "peermigration" + ingress.Namespace = namespace + + err = client.Create(context.TODO(), ingress) + Expect(err).NotTo(HaveOccurred()) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-fluentd-configmap", instance.Name), + Namespace: instance.Namespace, + }, + } + err = client.Create(context.TODO(), cm) + Expect(err).NotTo(HaveOccurred()) + + secretBytes, err := ioutil.ReadFile("../../testdata/migration/secret.json") + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + Data: map[string][]byte{"secret.json": secretBytes}, + } + secret.Name = "peermigration-msp-secret" + secret.Namespace = namespace + + err = client.Create(context.TODO(), secret) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + namespacedName := types.NamespacedName{ + Name: secret.Name, + Namespace: namespace, + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + mspSecret = &initsecret.Secret{} + err = json.Unmarshal(secretBytes, mspSecret) + Expect(err).NotTo(HaveOccurred()) + }) + + It("migrates old MSP secret to new secrets", func() { + err := migrator.Migrate() + Expect(err).NotTo(HaveOccurred()) + + By("creating a secret with state of current resources before migration", func() { + var secret *corev1.Secret + var err error + + Eventually(func() bool { + secret, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "peermigration-oldstate", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(secret.Data["peermigration-service"]).NotTo(Equal("")) + Expect(secret.Data["peermigration-cm-fluentd"]).NotTo(Equal("")) + }) + + By("creating ecert ca certs secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-peermigration-cacerts", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating ecert keystore secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-peermigration-keystore", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating ecert signcert secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-peermigration-signcert", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating ecert admin cert secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-peermigration-admincerts", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating tls ca certs secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-peermigration-cacerts", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating tls keystore certs secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-peermigration-keystore", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating tls signcert secret", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "tls-peermigration-signcert", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("creating a new service with no 'service' and same nodeport", func() { + var service *corev1.Service + var err error + + Eventually(func() bool { + service, err = kclient.CoreV1().Services(namespace).Get(context.TODO(), "peermigration", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(service.Spec.Ports[0].NodePort).To(Equal(peerApiNodePort)) + Expect(service.Spec.Ports[1].NodePort).To(Equal(operationNodePort)) + Expect(service.Spec.Ports[2].NodePort).To(Equal(grpcwebDebugNodePort)) + Expect(service.Spec.Ports[3].NodePort).To(Equal(grpcwebNodePort)) + + _, err = kclient.CoreV1().Services(namespace).Get(context.TODO(), "peermigration-service", metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + By("creating a new ingress with no dashes and same servicename", func() { + var ingress *networkingv1.Ingress + var err error + + Eventually(func() bool { + ingress, err = kclient.NetworkingV1().Ingresses(namespace).Get(context.TODO(), "peermigration", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal("peermigration")) + }) + }) + }) + + Context("orderer", func() { + var ( + migrator *allmigrator.Migrator + instance *current.IBPOrderer + mspSecret *initsecret.Secret + ) + + BeforeEach(func() { + logf.SetLogger(zap.New()) + + defaultConfigs := "../../defaultconfig" + of, err := offering.GetType("K8S") + + operatorCfg := &config.Config{ + CAInitConfig: &cainit.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "ca/ca.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "ca/tlsca.yaml"), + SharedPath: "/shared", + }, + PeerInitConfig: &peerinit.Config{ + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + }, + OrdererInitConfig: &ordererinit.Config{ + OrdererFile: filepath.Join(defaultConfigs, "orderer/orderer.yaml"), + ConfigTxFile: filepath.Join(defaultConfigs, "orderer/configtx.yaml"), + OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), + }, + Offering: of, + } + + mockValidator := &mocks.CryptoValidator{} + mockValidator.CheckEcertCryptoReturns(errors.New("not found")) + + migrator = allmigrator.New(mgr, operatorCfg, namespace) + + instance = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "orderer-migration", + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + Domain: "orderer.url", + }, + Status: current.IBPOrdererStatus{ + CRStatus: current.CRStatus{ + Status: current.True, + }, + }, + } + err = client.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + err = client.UpdateStatus(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + + secretBytes, err := ioutil.ReadFile("../../testdata/migration/secret.json") + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + Data: map[string][]byte{"secret.json": secretBytes}, + } + secret.Name = "orderer-migration-secret" + secret.Namespace = namespace + + err = client.Create(context.TODO(), secret) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + namespacedName := types.NamespacedName{ + Name: secret.Name, + Namespace: namespace, + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + + mspSecret = &initsecret.Secret{} + err = json.Unmarshal(secretBytes, mspSecret) + Expect(err).NotTo(HaveOccurred()) + + configmap := &corev1.ConfigMap{} + configmap.Name = fmt.Sprintf("%s-env-configmap", instance.GetName()) + configmap.Namespace = namespace + + err = client.Create(context.TODO(), configmap) + Expect(err).NotTo(HaveOccurred()) + + ingress := &networkingv1.Ingress{ + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "camigration-service", + Port: networkingv1.ServiceBackendPort{ + Number: 443, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + ingress.Name = "orderer-migration" + ingress.Namespace = namespace + + err = client.Create(context.TODO(), ingress) + Expect(err).NotTo(HaveOccurred()) + + n := types.NamespacedName{ + Name: ingress.GetName(), + Namespace: ingress.GetNamespace(), + } + + err = wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { + err := client.Get(context.TODO(), n, ingress) + if err == nil { + return true, nil + } + return false, nil + }) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("generates the configmap", func() { + err := migrator.Migrate() + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "orderer-migrationnode1-config", metav1.GetOptions{}) + if err != nil { + return true + } + return false + }).Should(Equal(true)) + }) + }) +}) + +func CreateServiceWithRetry(service *corev1.Service, retryNumber int) (int32, int32) { + err := client.Create(context.TODO(), service) + if err != nil { + if retryNumber == 0 { + Expect(err).NotTo(HaveOccurred()) + } + if strings.Contains(err.Error(), "provided port is already allocated") { + fmt.Fprintf(GinkgoWriter, "encountered port error: %s, trying again\n", err) + for i, _ := range service.Spec.Ports { + service.Spec.Ports[i].NodePort = RandomNodePort() + } + CreateServiceWithRetry(service, retryNumber-1) + } + } + + return service.Spec.Ports[0].NodePort, service.Spec.Ports[1].NodePort +} + +func CreatePeerServiceWithRetry(service *corev1.Service, retryNumber int) (int32, int32, int32, int32) { + err := client.Create(context.TODO(), service) + if err != nil { + if retryNumber == 0 { + Expect(err).NotTo(HaveOccurred()) + } + if strings.Contains(err.Error(), "provided port is already allocated") { + fmt.Fprintf(GinkgoWriter, "encountered port error: %s, trying again\n", err) + for i, _ := range service.Spec.Ports { + service.Spec.Ports[i].NodePort = RandomNodePort() + } + CreatePeerServiceWithRetry(service, retryNumber-1) + } + } + + return service.Spec.Ports[0].NodePort, service.Spec.Ports[1].NodePort, service.Spec.Ports[2].NodePort, service.Spec.Ports[3].NodePort +} diff --git a/integration/nativeresourcepoller.go b/integration/nativeresourcepoller.go new file mode 100644 index 00000000..6d480676 --- /dev/null +++ b/integration/nativeresourcepoller.go @@ -0,0 +1,375 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration + +import ( + "context" + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type NativeResourcePoller struct { + Name string + Namespace string + Client *kubernetes.Clientset + retry int +} + +func (p *NativeResourcePoller) PVCExists() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + pvcList, err := p.Client.CoreV1().PersistentVolumeClaims(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, pvc := range pvcList.Items { + if strings.HasPrefix(pvc.Name, p.Name) { + return true + } + } + + return false +} + +func (p *NativeResourcePoller) IngressExists() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + ingressList, err := p.Client.NetworkingV1().Ingresses(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, ingress := range ingressList.Items { + if strings.HasPrefix(ingress.Name, p.Name) { + return true + } + } + + return false +} + +func (p *NativeResourcePoller) ServiceExists() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + serviceList, err := p.Client.CoreV1().Services(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, service := range serviceList.Items { + if strings.HasPrefix(service.Name, p.Name) { + return true + } + } + + return false +} + +func (p *NativeResourcePoller) ConfigMapExists() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + cmList, err := p.Client.CoreV1().ConfigMaps(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, cm := range cmList.Items { + if strings.HasPrefix(cm.Name, p.Name) { + return true + } + } + + return false +} + +func (p *NativeResourcePoller) DeploymentExists() bool { + dep, err := p.Client.AppsV1().Deployments(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{}) + if err == nil && dep != nil { + return true + } + + return false +} + +func (p *NativeResourcePoller) Deployment() *appsv1.Deployment { + deps := p.DeploymentList() + if len(deps.Items) > 0 { + return &deps.Items[0] + } + return nil +} + +func (p *NativeResourcePoller) DeploymentList() *appsv1.DeploymentList { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + deps, err := p.Client.AppsV1().Deployments(p.Namespace).List(context.TODO(), opts) + if err != nil { + return &appsv1.DeploymentList{} + } + return deps +} + +func (p *NativeResourcePoller) NumberOfDeployments() int { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + deps, err := p.Client.AppsV1().Deployments(p.Namespace).List(context.TODO(), opts) + if err != nil { + return 0 + } + + return len(deps.Items) +} + +func (p *NativeResourcePoller) NumberOfOrdererNodeDeployments() int { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("parent=%s", p.Name), + } + + deps, err := p.Client.AppsV1().Deployments(p.Namespace).List(context.TODO(), opts) + if err != nil { + return 0 + } + + return len(deps.Items) +} + +func (p *NativeResourcePoller) IsRunning() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("name=%s", p.Name), + } + podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, pod := range podList.Items { + if strings.HasPrefix(pod.Name, p.Name) { + if pod.Status.Phase == corev1.PodRunning { + containerStatuses := pod.Status.ContainerStatuses + for _, status := range containerStatuses { + if status.State.Running == nil { + return false + } + if !status.Ready { + return false + } + } + return true + } else if pod.Status.Phase == corev1.PodPending { + if p.retry == 0 { + if len(pod.Status.InitContainerStatuses) == 0 { + return false + } + initContainerStatuses := pod.Status.InitContainerStatuses + for _, status := range initContainerStatuses { + if status.State.Waiting != nil { + if status.State.Waiting.Reason == "CreateContainerConfigError" { + // Handling this error will make no difference + _ = p.Client.CoreV1().Pods(p.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + p.retry = 1 + } + } + } + } + } + } + } + + return false +} + +// PodCreated returns true if pod has been created based on app name +func (p *NativeResourcePoller) PodCreated() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + if len(podList.Items) != 0 { + return true + } + return false +} + +func (p *NativeResourcePoller) PodIsRunning() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, pod := range podList.Items { + if strings.HasPrefix(pod.Name, p.Name) { + switch pod.Status.Phase { + case corev1.PodRunning: + containerStatuses := pod.Status.ContainerStatuses + for _, status := range containerStatuses { + if status.State.Running == nil { + fmt.Fprintf(GinkgoWriter, "For pod '%s', container '%s' is not yet running\n", pod.Name, status.Name) + return false + } + if !status.Ready { + fmt.Fprintf(GinkgoWriter, "For pod '%s', container '%s' is not yet ready\n", pod.Name, status.Name) + return false + } + } + fmt.Fprintf(GinkgoWriter, "'%s' and it's containers are ready and running\n", pod.Name) + return true + case corev1.PodPending: + p.CheckForStuckPod(pod) + } + } + } + + return false +} + +func (p *NativeResourcePoller) CheckForStuckPod(pod corev1.Pod) bool { + fmt.Fprintf(GinkgoWriter, "'%s' in pending state, waiting for pod to start running...\n", pod.Name) + if p.retry > 0 { + return false // Out of retries, return + } + + if len(pod.Status.InitContainerStatuses) == 0 { + return false // No containers found, unable to get status to determine if pod is running + } + + initContainerStatuses := pod.Status.InitContainerStatuses + for _, status := range initContainerStatuses { + if status.State.Waiting != nil { + fmt.Fprintf(GinkgoWriter, "'%s' is waiting, with reason '%s'\n", pod.Name, status.State.Waiting.Reason) + + // Intermittent issues are see on pods with shared volume mounts that are deleted and created in + // quick succession, in suchs situation the pods sometimes ends up with an error stating that it + // can't mount to subPath. This can be resolved by deleting the pod and let it try again to + // acquire the volume mount. The code below mimics this solution by deleting the pod, which is + // brought back by the deployment and pod comes up fine. This is more of a hack to resolve this + // issue in test, the root cause might live in portworx or in operator. + if status.State.Waiting.Reason == "CreateContainerConfigError" { + fmt.Fprintf(GinkgoWriter, "Deleting pod '%s'\n", pod.Name) + err := p.Client.CoreV1().Pods(p.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + if err != nil { + fmt.Fprintf(GinkgoWriter, "Deleting pod '%s' failed: %s\n", pod.Name, err) + } + p.retry = 1 + } + } + } + + return true +} + +func (p *NativeResourcePoller) GetPods() []corev1.Pod { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) + if err != nil { + return nil + } + return podList.Items +} + +func (p *NativeResourcePoller) GetRunningPods() []corev1.Pod { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", p.Name), + } + podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) + if err != nil { + return nil + } + pods := []corev1.Pod{} + for _, pod := range podList.Items { + switch pod.Status.Phase { + case corev1.PodRunning: + containerStatuses := pod.Status.ContainerStatuses + + readyContainers := 0 + numOfContainers := len(containerStatuses) + + for _, status := range containerStatuses { + if status.Ready && status.State.Running != nil { + readyContainers++ + } + } + if readyContainers == numOfContainers { + pods = append(pods, pod) + } + + case corev1.PodPending: + p.CheckForStuckPod(pod) + } + } + + return pods +} + +func (p *NativeResourcePoller) TestAffinityZone(dep *appsv1.Deployment) bool { + zoneExp := "topology.kubernetes.io/zone" + + affinity := dep.Spec.Template.Spec.Affinity.NodeAffinity + if affinity != nil { + nodes := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms + for _, node := range nodes { + for _, expr := range node.MatchExpressions { + depExp := expr.Key + if zoneExp == depExp { + return true + } + } + } + } else { + return false + } + + return false +} + +func (p *NativeResourcePoller) TestAffinityRegion(dep *appsv1.Deployment) bool { + regionExp := "topology.kubernetes.io/region" + + affinity := dep.Spec.Template.Spec.Affinity.NodeAffinity + if affinity != nil { + nodes := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms + for _, node := range nodes { + for _, expr := range node.MatchExpressions { + depExp := expr.Key + if regionExp == depExp { + return true + } + } + } + } else { + return false + } + + return false +} diff --git a/integration/nginx-deployment.yaml b/integration/nginx-deployment.yaml new file mode 100644 index 00000000..3762454b --- /dev/null +++ b/integration/nginx-deployment.yaml @@ -0,0 +1,127 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.2.0 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + spec: + containers: + - args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --watch-ingress-without-class=true + - --publish-status-address=localhost + - --enable-ssl-passthrough + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + hostPort: 80 + name: http + protocol: TCP + - containerPort: 443 + hostPort: 443 + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + ingress-ready: "true" + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Equal + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission diff --git a/integration/operator.go b/integration/operator.go new file mode 100644 index 00000000..58768923 --- /dev/null +++ b/integration/operator.go @@ -0,0 +1,207 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration + +import ( + "context" + "fmt" + "os" + "path/filepath" + + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + cainit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + uzap "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// GetOperatorConfig returns the operator configuration with the default templating files population +// and with default versions set for components. +func GetOperatorConfig(configs, caFiles, peerFiles, ordererFiles, consoleFiles string) *config.Config { + ulevel := uzap.NewAtomicLevelAt(2) + if os.Getenv("LOG_LEVEL") == "debug" { + ulevel = uzap.NewAtomicLevelAt(-1) + } + level := zap.Level(&ulevel) + logger := zap.New(zap.Opts(level)) + + cfg := &config.Config{ + CAInitConfig: &cainit.Config{ + CADefaultConfigPath: filepath.Join(configs, "ca/ca.yaml"), + TLSCADefaultConfigPath: filepath.Join(configs, "ca/tlsca.yaml"), + DeploymentFile: filepath.Join(caFiles, "deployment.yaml"), + PVCFile: filepath.Join(caFiles, "pvc.yaml"), + ServiceFile: filepath.Join(caFiles, "service.yaml"), + RoleFile: filepath.Join(caFiles, "role.yaml"), + ServiceAccountFile: filepath.Join(caFiles, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(caFiles, "rolebinding.yaml"), + ConfigMapFile: filepath.Join(caFiles, "configmap-caoverride.yaml"), + IngressFile: filepath.Join(caFiles, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(caFiles, "ingressv1beta1.yaml"), + RouteFile: filepath.Join(caFiles, "route.yaml"), + SharedPath: "/tmp/data", + }, + PeerInitConfig: &peerinit.Config{ + CorePeerFile: filepath.Join(configs, "peer/core.yaml"), + CorePeerV2File: filepath.Join(configs, "peer/v2/core.yaml"), + OUFile: filepath.Join(configs, "peer/ouconfig.yaml"), + InterOUFile: filepath.Join(configs, "peer/ouconfig-inter.yaml"), + DeploymentFile: filepath.Join(peerFiles, "deployment.yaml"), + PVCFile: filepath.Join(peerFiles, "pvc.yaml"), + CouchDBPVCFile: filepath.Join(peerFiles, "couchdb-pvc.yaml"), + ServiceFile: filepath.Join(peerFiles, "service.yaml"), + RoleFile: filepath.Join(peerFiles, "role.yaml"), + ServiceAccountFile: filepath.Join(peerFiles, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(peerFiles, "rolebinding.yaml"), + FluentdConfigMapFile: filepath.Join(peerFiles, "fluentd-configmap.yaml"), + CouchContainerFile: filepath.Join(peerFiles, "couchdb.yaml"), + CouchInitContainerFile: filepath.Join(peerFiles, "couchdb-init.yaml"), + IngressFile: filepath.Join(peerFiles, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(peerFiles, "ingressv1beta1.yaml"), + CCLauncherFile: filepath.Join(peerFiles, "chaincode-launcher.yaml"), + RouteFile: filepath.Join(peerFiles, "route.yaml"), + StoragePath: "/tmp/peerinit", + }, + OrdererInitConfig: &ordererinit.Config{ + OrdererV2File: filepath.Join(configs, "orderer/v2/orderer.yaml"), + OrdererV24File: filepath.Join(configs, "orderer/v24/orderer.yaml"), + OrdererFile: filepath.Join(configs, "orderer/orderer.yaml"), + ConfigTxFile: filepath.Join(configs, "orderer/configtx.yaml"), + OUFile: filepath.Join(configs, "orderer/ouconfig.yaml"), + InterOUFile: filepath.Join(configs, "orderer/ouconfig-inter.yaml"), + DeploymentFile: filepath.Join(ordererFiles, "deployment.yaml"), + PVCFile: filepath.Join(ordererFiles, "pvc.yaml"), + ServiceFile: filepath.Join(ordererFiles, "service.yaml"), + CMFile: filepath.Join(ordererFiles, "configmap.yaml"), + RoleFile: filepath.Join(ordererFiles, "role.yaml"), + ServiceAccountFile: filepath.Join(ordererFiles, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(ordererFiles, "rolebinding.yaml"), + IngressFile: filepath.Join(ordererFiles, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(ordererFiles, "ingressv1beta1.yaml"), + RouteFile: filepath.Join(ordererFiles, "route.yaml"), + StoragePath: "/tmp/ordererinit", + }, + ConsoleInitConfig: &config.ConsoleConfig{ + DeploymentFile: filepath.Join(consoleFiles, "deployment.yaml"), + PVCFile: filepath.Join(consoleFiles, "pvc.yaml"), + ServiceFile: filepath.Join(consoleFiles, "service.yaml"), + CMFile: filepath.Join(consoleFiles, "configmap.yaml"), + ConsoleCMFile: filepath.Join(consoleFiles, "console-configmap.yaml"), + DeployerCMFile: filepath.Join(consoleFiles, "deployer-configmap.yaml"), + RoleFile: filepath.Join(consoleFiles, "role.yaml"), + RoleBindingFile: filepath.Join(consoleFiles, "rolebinding.yaml"), + ServiceAccountFile: filepath.Join(consoleFiles, "serviceaccount.yaml"), + IngressFile: filepath.Join(consoleFiles, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(consoleFiles, "ingressv1beta1.yaml"), + NetworkPolicyIngressFile: filepath.Join(consoleFiles, "networkpolicy-ingress.yaml"), + NetworkPolicyDenyAllFile: filepath.Join(consoleFiles, "networkpolicy-denyall.yaml"), + }, + Logger: &logger, + Operator: config.Operator{ + Restart: config.Restart{ + Timeout: common.MustParseDuration("5m"), + }, + }, + } + + setDefaultVersions(cfg) + return cfg +} + +func setDefaultVersions(operatorCfg *config.Config) { + operatorCfg.Operator.Versions = &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + FabricCAVersion + "-1": { + Default: true, + Version: FabricCAVersion + "-1", + Image: deployer.CAImages{ + CAInitImage: InitImage, + CAInitTag: InitTag, + CAImage: CaImage, + CATag: CaTag, + }, + }, + }, + Peer: map[string]deployer.VersionPeer{ + FabricVersion + "-1": { + Default: true, + Version: FabricVersion + "-1", + Image: deployer.PeerImages{ + PeerInitImage: InitImage, + PeerInitTag: InitTag, + PeerImage: PeerImage, + PeerTag: PeerTag, + CouchDBImage: CouchdbImage, + CouchDBTag: CouchdbTag, + GRPCWebImage: GrpcwebImage, + GRPCWebTag: GrpcwebTag, + }, + }, + }, + Orderer: map[string]deployer.VersionOrderer{ + FabricVersion + "-1": { + Default: true, + Version: FabricVersion + "-1", + Image: deployer.OrdererImages{ + OrdererInitImage: InitImage, + OrdererInitTag: InitTag, + OrdererImage: OrdererImage, + OrdererTag: OrdererTag, + GRPCWebImage: GrpcwebImage, + GRPCWebTag: GrpcwebTag, + }, + }, + }, + } +} + +type Operator struct { + NativeResourcePoller +} + +func (o *Operator) GetPod() (*corev1.Pod, error) { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("name=%s", o.Name), + } + podList, err := o.Client.CoreV1().Pods(o.Namespace).List(context.TODO(), opts) + if err != nil { + return nil, err + } + return &podList.Items[0], nil +} + +func (o *Operator) Restart() error { + pod, err := o.GetPod() + if err != nil { + return err + } + + err = o.Client.CoreV1().Pods(o.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + return nil +} diff --git a/integration/operatorrestart/operatorrestart_suite_test.go b/integration/operatorrestart/operatorrestart_suite_test.go new file mode 100644 index 00000000..874ab0e9 --- /dev/null +++ b/integration/operatorrestart/operatorrestart_suite_test.go @@ -0,0 +1,370 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorrestart_test + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + "github.com/IBM-Blockchain/fabric-operator/pkg/command" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" +) + +func TestOperatorrestart(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Operatorrestart Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + peerAdminUsername = "peer-admin" + peerUsername = "peer" + ordererUsername = "orderer" + + IBPCAS = "ibpcas" + IBPPEERS = "ibppeers" + IBPORDERERS = "ibporderers" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA + org1peer *helper.Peer + orderer *helper.Orderer +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "operatorrestart", "") + + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerUsername, "peerpw", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering and enrolling peer admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerAdminUsername, "peer-adminpw", "admin"), "Register Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername)) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(ordererUsername, "ordererpw", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("orderer2", "ordererpw2", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + adminCertBytes, err := ioutil.ReadFile( + filepath.Join( + wd, + "org1peer", + peerAdminUsername, + "msp", + "signcerts", + "cert.pem", + ), + ) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + tlsCert := base64.StdEncoding.EncodeToString(tlsBytes) + + By("starting Peer pod", func() { + org1peer = Org1Peer(tlsCert, caHost, adminCertB64) + err = helper.CreatePeer(ibpCRClient, org1peer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + By("starting Orderer pod", func() { + orderer = GetOrderer(tlsCert, caHost) + err = helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + Eventually(orderer.Nodes[0].PodIsRunning).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, "../../scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) +} + +func RestartOperator() { + fmt.Fprintf(GinkgoWriter, "Restarting operator\n") + integration.ShutdownOperator(GinkgoWriter) + + fmt.Fprintf(GinkgoWriter, "Operator stopped\n") + + // Currently triggering restart by closing channel results in following error on operator restart: + // {"level":"error","ts":1600966252.5380569,"logger":"controller-runtime.metrics","msg":"failed to register metric","name":"workqueue_retries_total","queue":"ibpconsole-controller","error":"duplicate metrics collector registration attempted" + // + // This error is not a breaking error, it can be ignored for testing purposes + + fmt.Fprintf(GinkgoWriter, "Starting operator\n") + err := command.OperatorWithSignal(integration.OperatorCfg(), integration.SetupSignalHandler(), false, true) + Expect(err).NotTo(HaveOccurred()) +} + +func Org1CA() *helper.CA { + caOverrides := &v1.ServerConfig{ + Debug: pointer.True(), + CAConfig: v1.CAConfig{ + Affiliations: map[string]interface{}{ + "org1": []string{"department1"}, + }, + DB: &v1.CAConfigDB{ + Type: "sqlite3", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(caOverrides) + Expect(err).NotTo(HaveOccurred()) + + name := "ibpca1" + cr := ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + ImagePullSecrets: []string{"regcred"}, + Domain: domain, + Images: ¤t.CAImages{ + CAImage: integration.CaImage, + CATag: integration.CaTag, + CAInitImage: integration.InitImage, + CAInitTag: integration.InitTag, + }, + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + TLSCA: &runtime.RawExtension{Raw: *caJson}, + }, + FabricVersion: integration.FabricCAVersion, + }, + } + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func Org1Peer(tlsCert, caHost, adminCert string) *helper.Peer { + cr, err := helper.Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert) + Expect(err).NotTo(HaveOccurred()) + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetOrderer(tlsCert, caHost string) *helper.Orderer { + cr, err := helper.OrdererCR(namespace, domain, ordererUsername, tlsCert, caHost) + Expect(err).NotTo(HaveOccurred()) + + nodes := []helper.Orderer{ + helper.Orderer{ + Name: cr.Name + "node1", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = cr.Name + "node1" + + return &helper.Orderer{ + Name: cr.Name, + Namespace: namespace, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + Nodes: nodes, + } +} diff --git a/integration/operatorrestart/operatorrestart_test.go b/integration/operatorrestart/operatorrestart_test.go new file mode 100644 index 00000000..436f2d68 --- /dev/null +++ b/integration/operatorrestart/operatorrestart_test.go @@ -0,0 +1,114 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorrestart_test + +import ( + "fmt" + "time" + + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("operator restart", func() { + Context("CA", func() { + var ( + originalPodName string + ) + + BeforeEach(func() { + Eventually(func() int { + return len(org1ca.GetRunningPods()) + }).Should(Equal(1)) + originalPodName = org1ca.GetRunningPods()[0].Name + }) + + It("does not restart ca on operator restart", func() { + RestartOperator() + + Consistently(func() string { + fmt.Fprintf(GinkgoWriter, "Making sure '%s' does not restart, original pod name '%s'\n", org1ca.Name, originalPodName) + + if len(org1ca.GetRunningPods()) != 1 { + return "incorrect number of running pods" + } + + return org1ca.GetRunningPods()[0].Name + }, 5*time.Second, time.Second).Should(Equal(originalPodName)) + }) + }) + + Context("Peer", func() { + var ( + originalPodName string + ) + + BeforeEach(func() { + Eventually(func() int { + return len(org1peer.GetRunningPods()) + }).Should(Equal(1)) + originalPodName = org1peer.GetRunningPods()[0].Name + }) + + It("does not restart peer on operator restart", func() { + RestartOperator() + + Consistently(func() string { + fmt.Fprintf(GinkgoWriter, "Making sure '%s' does not restart, original pod name '%s'\n", org1peer.Name, originalPodName) + + if len(org1peer.GetRunningPods()) != 1 { + return "incorrect number of running pods" + } + + return org1peer.GetRunningPods()[0].Name + }, 5*time.Second, time.Second).Should(Equal(originalPodName)) + }) + }) + + Context("Orderer Node", func() { + var ( + node helper.Orderer + originalPodName string + ) + + BeforeEach(func() { + node = orderer.Nodes[0] + + Eventually(func() int { + return len(node.GetRunningPods()) + }).Should(Equal(1)) + originalPodName = node.GetRunningPods()[0].Name + }) + + It("does not restart orderer node on operator restart", func() { + RestartOperator() + + Consistently(func() string { + fmt.Fprintf(GinkgoWriter, "Making sure '%s' does not restart, original pod name '%s'\n", node.Name, originalPodName) + + if len(org1peer.GetRunningPods()) != 1 { + return "incorrect number of running pods" + } + + return node.GetRunningPods()[0].Name + }, 5*time.Second, time.Second).Should(Equal(originalPodName)) + }) + }) +}) diff --git a/integration/orderer/orderer_suite_test.go b/integration/orderer/orderer_suite_test.go new file mode 100644 index 00000000..9fef120f --- /dev/null +++ b/integration/orderer/orderer_suite_test.go @@ -0,0 +1,105 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderer_test + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + "k8s.io/client-go/kubernetes" +) + +func TestOrderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Orderer Suite") +} + +const ( + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + ordererUsername = "orderer" + ordererPassword = "orderer" +) + +var ( + namespaceSuffix = "orderer" + + namespace string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + testFailed bool + wd string +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(300 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, namespaceSuffix, "") + Expect(err).NotTo(HaveOccurred()) + +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + err := integration.Cleanup(GinkgoWriter, kclient, namespace) + Expect(err).NotTo(HaveOccurred()) +}) + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession(helper.GetCommand(filepath.Join(wd, "../../scripts/download_binaries.sh")), "Download Binaries") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} diff --git a/integration/orderer/orderer_test.go b/integration/orderer/orderer_test.go new file mode 100644 index 00000000..b4a89f87 --- /dev/null +++ b/integration/orderer/orderer_test.go @@ -0,0 +1,1404 @@ +//go:build !pkcs11 +// +build !pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderer_test + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" +) + +type OrdererConfig interface { + ToBytes() ([]byte, error) +} + +var ( + orderer *Orderer + orderer2 *Orderer + orderer3 *Orderer + orderer4 *Orderer + orderer5 *Orderer + orderer1nodes []Orderer + orderer2nodes []Orderer + orderer3nodes []Orderer + orderer4nodes []Orderer + orderer5nodes []Orderer +) + +var ( + defaultRequestsOrderer = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("40M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsOrderer = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + defaultRequestsProxy = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsProxy = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + testMSPSpec = ¤t.MSPSpec{ + Component: ¤t.MSP{ + KeyStore: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ2FYb2MwNkxoWmliYjFsSEUKU0ZaY2NSeThmcWUySjROQW1rdEtXZEpFZVBxaFJBTkNBQVJ4UGVOKy94WHRLeTdXNGlZajUxQ29LQ2NmZ2Y4NApnMDBkamEzSStNeHNLSDZncVNQUGpXbThvUi9sYnZhbW9jay84bURoRi9yZTd3SU5qWkpGeG80aAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==", + SignCerts: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUVENDQWZPZ0F3SUJBZ0lVTUw4NVhXVVJLZURqV1ZjelNWZ0ZoWDdtWlFjd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TVRFek1ESXdNVGN3TUZvWERUSTFNVEV5T1RJd01qSXdNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJIRTk0MzcvRmUwckx0YmlKaVBuVUtnb0p4K0IvemlEVFIyTnJjajQKekd3b2ZxQ3BJOCtOYWJ5aEgrVnU5cWFoeVQveVlPRVgrdDd2QWcyTmtrWEdqaUdqZ1lVd2dZSXdEZ1lEVlIwUApBUUgvQkFRREFnZUFNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZNSGxPTGthZTFSbFRaZ1BNQ0ZQCkxKai80MHBzTUI4R0ExVWRJd1FZTUJhQUZNeTZicUR5Q1p1UThEeTBQWkhtVUNJTDRzNmlNQ0lHQTFVZEVRUWIKTUJtQ0YxTmhZV1J6TFUxaFkwSnZiMnN0VUhKdkxteHZZMkZzTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFERAowY1Z6aEJFcGo1aFhYVXQzQSsxQVZOc2IyZDgxNVpZSVVVTG0xQXZ5T1FJZ1d1eldoVzQ5QUNWSG8zWkhNRE1vCmU5d3FRbUpTNDB2UGJtMEtOVUVkdURjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + CACerts: []string{"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVS2dNc2pwYlFSNlRHUUs3QVBhMEZmUVZxT1pvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TVRFek1ESXdNVFV3TUZvWERUTTFNVEV5TnpJd01UVXdNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUUrb2lXeWdGNWpLY081cWtzaG8zN3lzRSsKdXYxMEF5WWZrUGxVWXlBVkJOeGtlSGN1RUlWSmY5LzZRL2x2S2NvUyt6cFp2dlFiSTEzT1pSTDNMK25IZXFORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZNeTZicUR5Q1p1UThEeTBQWkhtVUNJTDRzNmlNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQmdSTXNqN3Azc1YKMHNieEQxa2t0amloVEpHVFJBWlZRQXVyY0hhRVVENFVBaUFoN0o4U2ZPQTc5VjN4RDdvaExFcmVpZHVnZnhIbAozWWxZS0g3MG9qQXhRZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"}, + AdminCerts: []string{"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUVENDQWZPZ0F3SUJBZ0lVTUw4NVhXVVJLZURqV1ZjelNWZ0ZoWDdtWlFjd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TVRFek1ESXdNVGN3TUZvWERUSTFNVEV5T1RJd01qSXdNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJIRTk0MzcvRmUwckx0YmlKaVBuVUtnb0p4K0IvemlEVFIyTnJjajQKekd3b2ZxQ3BJOCtOYWJ5aEgrVnU5cWFoeVQveVlPRVgrdDd2QWcyTmtrWEdqaUdqZ1lVd2dZSXdEZ1lEVlIwUApBUUgvQkFRREFnZUFNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZNSGxPTGthZTFSbFRaZ1BNQ0ZQCkxKai80MHBzTUI4R0ExVWRJd1FZTUJhQUZNeTZicUR5Q1p1UThEeTBQWkhtVUNJTDRzNmlNQ0lHQTFVZEVRUWIKTUJtQ0YxTmhZV1J6TFUxaFkwSnZiMnN0VUhKdkxteHZZMkZzTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFERAowY1Z6aEJFcGo1aFhYVXQzQSsxQVZOc2IyZDgxNVpZSVVVTG0xQXZ5T1FJZ1d1eldoVzQ5QUNWSG8zWkhNRE1vCmU5d3FRbUpTNDB2UGJtMEtOVUVkdURjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="}, + }, + TLS: ¤t.MSP{ + KeyStore: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZzZuNit4cDJod1hrTzBrWHUKbUFiY2Z3aGNUcllDOEQ4SDJFNUZPUmNpMFBTaFJBTkNBQVFCMDBTNDhwbGlmd2tIN1RucGtZUTQrd1hJQ1piSwpnL1Z0U3ZoVUQyOC93dkd4VXdBZXBwSVZCRElCUUZBaE9xZ1F5SkpBQTZWbTVyd2RKaG1aR3M5SQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==", + SignCerts: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNiRENDQWhLZ0F3SUJBZ0lVT3RnTGwwR0orSjU2T1llcXI3UFI1ckhKakhNd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TVRFek1ESXdNVGt3TUZvWERUSTFNVEV5T1RJd01qUXdNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBSFRSTGp5bVdKL0NRZnRPZW1SaERqN0JjZ0psc3FEOVcxSytGUVAKYnovQzhiRlRBQjZta2hVRU1nRkFVQ0U2cUJESWtrQURwV2JtdkIwbUdaa2F6MGlqZ2FRd2dhRXdEZ1lEVlIwUApBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CCkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTOTY4MUFxUEZ1dndHNUZsVFROS0J2Z2FKdk56QWZCZ05WSFNNRUdEQVcKZ0JUTXVtNmc4Z21ia1BBOHREMlI1bEFpQytMT29qQWlCZ05WSFJFRUd6QVpnaGRUWVdGa2N5MU5ZV05DYjI5cgpMVkJ5Ynk1c2IyTmhiREFLQmdncWhrak9QUVFEQWdOSUFEQkZBaUVBK0RzckZlUkxEQXJ1eVNxVWJmc2hVWkFCCmhMNXpqZ2k2ckpFZzFtQW1iSFVDSUUwSjFQOUlxVFZHMU54UjdEQ1lBdVZkbmJ4eWJHWkUyMDA5eDl3Y0pudksKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", + CACerts: []string{"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVS2dNc2pwYlFSNlRHUUs3QVBhMEZmUVZxT1pvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TVRFek1ESXdNVFV3TUZvWERUTTFNVEV5TnpJd01UVXdNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUUrb2lXeWdGNWpLY081cWtzaG8zN3lzRSsKdXYxMEF5WWZrUGxVWXlBVkJOeGtlSGN1RUlWSmY5LzZRL2x2S2NvUyt6cFp2dlFiSTEzT1pSTDNMK25IZXFORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZNeTZicUR5Q1p1UThEeTBQWkhtVUNJTDRzNmlNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQmdSTXNqN3Azc1YKMHNieEQxa2t0amloVEpHVFJBWlZRQXVyY0hhRVVENFVBaUFoN0o4U2ZPQTc5VjN4RDdvaExFcmVpZHVnZnhIbAozWWxZS0g3MG9qQXhRZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"}, + }, + } +) + +var _ = Describe("Interaction between IBP-Operator and Kubernetes cluster", func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + BeforeEach(func() { + orderer, orderer1nodes = GetOrderer() + err := helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + + integration.ClearOperatorConfig(kclient, namespace) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("IBPOrderer controller", func() { + + Context("applying first instance of IBPOrderer CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + It("creates a IBPOrderer custom resource", func() { + By("setting the CR status to precreate", func() { + for _, node := range orderer1nodes { + Eventually(node.pollForCRStatus).Should((Equal(current.Precreated))) + } + // TODO flake + // Eventually(orderer.pollForCRStatus).Should((Equal(current.Deploying))) + }) + + By("creating a pvc", func() { + for _, node := range orderer1nodes { + Eventually(node.PVCExists).Should((Equal(true))) + } + }) + + By("creating a service", func() { + for _, node := range orderer1nodes { + Eventually(node.ServiceExists).Should((Equal(true))) + } + }) + + By("creating a configmap", func() { + for _, node := range orderer1nodes { + Eventually(node.ConfigMapExists).Should((Equal(true))) + } + }) + + By("starting a ingress", func() { + for _, node := range orderer1nodes { + Eventually(node.IngressExists).Should((Equal(true))) + } + }) + + By("creating a deployment", func() { + for _, node := range orderer1nodes { + Eventually(node.DeploymentExists).Should((Equal(true))) + } + }) + + By("creating init secrets", func() { + for _, node := range orderer1nodes { + Eventually(node.allInitSecretsExist).Should((Equal(true))) + } + }) + + By("starting a pod", func() { + for _, node := range orderer1nodes { + Eventually(node.PodIsRunning).Should((Equal(true))) + } + }) + + By("creating config map that contains spec", func() { + for _, node := range orderer1nodes { + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), node.Name+"-spec", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + } + }) + + By("setting the CR status to deployed when pod is running", func() { + for _, node := range orderer1nodes { + Eventually(node.pollForCRStatus).Should((Equal(current.Deployed))) + } + Eventually(orderer.pollForCRStatus).Should((Equal(current.Deployed))) + }) + + By("overriding general section in orderer.yaml", func() { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), orderer.Name+"node1-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + ordererBytes := cm.BinaryData["orderer.yaml"] + ordererConfig, err := config.ReadOrdererFromBytes(ordererBytes) + Expect(err).NotTo(HaveOccurred()) + configOverride, err := orderer.CR.GetConfigOverride() + Expect(err).NotTo(HaveOccurred()) + bytes, err := configOverride.(OrdererConfig).ToBytes() + Expect(err).NotTo(HaveOccurred()) + oConfig := &config.Orderer{} + err = yaml.Unmarshal(bytes, oConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(ordererConfig.General.ListenPort).To(Equal(oConfig.General.ListenPort)) + }) + }) + + It("should not find zone and region", func() { + // Wait for new deployment before querying deployment for updates + err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + ready := true + for _, node := range orderer1nodes { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas != 1 || dep.Status.Conditions[0].Type != appsv1.DeploymentAvailable { + ready = false + } + } + } + + return ready, nil + }) + Expect(err).NotTo(HaveOccurred()) + for _, node := range orderer1nodes { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking zone", func() { + Expect(node.TestAffinityZone(dep)).To((Equal(false))) + }) + + By("checking region", func() { + Expect(node.TestAffinityRegion(dep)).To((Equal(false))) + }) + } + }) + + When("the custom resource is updated", func() { + var ( + dep *appsv1.Deployment + newResourceRequestsOrderer corev1.ResourceList + newResourceLimitsOrderer corev1.ResourceList + newResourceRequestsProxy corev1.ResourceList + newResourceLimitsProxy corev1.ResourceList + ) + + BeforeEach(func() { + newResourceRequestsOrderer = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("240m"), + corev1.ResourceMemory: resource.MustParse("480M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsOrderer = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("240m"), + corev1.ResourceMemory: resource.MustParse("480M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + newResourceRequestsProxy = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("90m"), + corev1.ResourceMemory: resource.MustParse("180M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsProxy = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("90m"), + corev1.ResourceMemory: resource.MustParse("180M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + for _, node := range orderer1nodes { + Eventually(node.DeploymentExists).Should((Equal(true))) + } + }) + + It("updates the instance of IBPOrderer if resources are updated in CR", func() { + for _, node := range orderer1nodes { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + + ordererResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(ordererResources.Requests).To(Equal(defaultRequestsOrderer)) + Expect(ordererResources.Limits).To(Equal(defaultLimitsOrderer)) + + proxyResources := dep.Spec.Template.Spec.Containers[1].Resources + Expect(proxyResources.Requests).To(Equal(defaultRequestsProxy)) + Expect(proxyResources.Limits).To(Equal(defaultLimitsProxy)) + + updatenode := ¤t.IBPOrderer{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibporderers").Name(node.Name).Do(context.TODO()) + result.Into(updatenode) + + updatenode.Spec.Resources = ¤t.OrdererResources{ + Orderer: &corev1.ResourceRequirements{ + Requests: newResourceRequestsOrderer, + Limits: newResourceLimitsOrderer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: newResourceRequestsProxy, + Limits: newResourceLimitsProxy, + }, + } + configOverride := &config.Orderer{ + Orderer: v2.Orderer{ + FileLedger: v1.FileLedger{ + Location: "/temp", + }, + }, + } + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + updatenode.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + + bytes, err := json.Marshal(updatenode) + Expect(err).NotTo(HaveOccurred()) + + result = ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibporderers").Name(node.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + Eventually(func() bool { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas == 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == newResourceRequestsOrderer.Cpu().MilliValue() { + return true + } + } + } + return false + }).Should(Equal(true)) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + updatedOrdererResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(updatedOrdererResources.Requests).To(Equal(newResourceRequestsOrderer)) + Expect(updatedOrdererResources.Limits).To(Equal(newResourceLimitsOrderer)) + + updatedProxyResources := dep.Spec.Template.Spec.Containers[1].Resources + Expect(updatedProxyResources.Requests).To(Equal(newResourceRequestsProxy)) + Expect(updatedProxyResources.Limits).To(Equal(newResourceLimitsProxy)) + + By("updating the config map with new values from override", func() { + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), orderer.Name+"node1-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + configBytes := cm.BinaryData["orderer.yaml"] + ordererConfig, err := config.ReadOrdererFromBytes(configBytes) + Expect(err).NotTo(HaveOccurred()) + + if ordererConfig.FileLedger.Location == "/temp" { + return true + } + + return false + }).Should(Equal(true)) + }) + } + }) + }) + + When("a deployment managed by operator is manually edited", func() { + var ( + err error + dep *appsv1.Deployment + ) + + BeforeEach(func() { + for _, node := range orderer1nodes { + Eventually(node.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + }) + + It("restores states", func() { + for _, node := range orderer1nodes { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + origRequests := dep.Spec.Template.Spec.Containers[0].Resources.Requests + dep.Spec.Template.Spec.Containers[0].Resources.Requests = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + } + + depBytes, err := json.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + _, err = kclient.AppsV1().Deployments(namespace).Patch(context.TODO(), node.NodeName, types.MergePatchType, depBytes, metav1.PatchOptions{}) + Expect(util.IgnoreOutdatedResourceVersion(err)).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + if dep != nil { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == origRequests.Cpu().MilliValue() { + return true, nil + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(origRequests)) + } + }) + }) + }) + + Context("applying last instance of IBPOrderer CR, with channel-less config", func() { + + // NOTE: THIS COUNTER MUST BE EQUAL TO THE NUMBER OF It() ROUTINES IN THIS CONTEXT + checks_remaining := 2 + + // Set up the orderer before the FIRST It() of this context + BeforeEach(func() { + if orderer5 == nil { + orderer5, orderer5nodes = GetOrderer5() + err := helper.CreateOrderer(ibpCRClient, orderer5.CR) + Expect(err).NotTo(HaveOccurred()) + } + }) + + // Tear down the orderer after the LAST It() in this context + AfterEach(func() { + checks_remaining-- + if checks_remaining == 0 { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(orderer5.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + orderer5 = nil + orderer5nodes = nil + } + }) + + It("creates a IBPOrderer custom resource", func() { + By("creating a pvc", func() { + for _, node := range orderer5nodes { + Eventually(node.PVCExists).Should((Equal(true))) + } + }) + + By("creating a service", func() { + for _, node := range orderer5nodes { + Eventually(node.ServiceExists).Should((Equal(true))) + } + }) + + By("creating a configmap", func() { + for _, node := range orderer5nodes { + Eventually(node.ConfigMapExists).Should((Equal(true))) + } + }) + + By("starting a ingress", func() { + for _, node := range orderer5nodes { + Eventually(node.IngressExists).Should((Equal(true))) + } + }) + + By("creating a deployment", func() { + for _, node := range orderer5nodes { + Eventually(node.DeploymentExists).Should((Equal(true))) + } + }) + + By("creating init secrets", func() { + for _, node := range orderer5nodes { + Eventually(node.allInitSecretsExist).Should((Equal(true))) + } + }) + + By("starting a pod", func() { + for _, node := range orderer5nodes { + Eventually(node.PodIsRunning).Should((Equal(true))) + } + }) + + By("creating config map that contains spec", func() { + for _, node := range orderer5nodes { + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), node.Name+"-spec", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + } + }) + + By("setting the CR status to deployed when pod is running", func() { + for _, node := range orderer5nodes { + Eventually(node.pollForCRStatus).Should((Equal(current.Deployed))) + } + Eventually(orderer5.pollForCRStatus).Should((Equal(current.Deployed))) + }) + }) + + When("a deployment managed by operator is manually edited", func() { + var ( + err error + dep *appsv1.Deployment + ) + + BeforeEach(func() { + for _, node := range orderer5nodes { + Eventually(node.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + }) + + It("restores states", func() { + for _, node := range orderer5nodes { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + origRequests := dep.Spec.Template.Spec.Containers[0].Resources.Requests + dep.Spec.Template.Spec.Containers[0].Resources.Requests = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + } + + depBytes, err := json.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + _, err = kclient.AppsV1().Deployments(namespace).Patch(context.TODO(), node.NodeName, types.MergePatchType, depBytes, metav1.PatchOptions{}) + Expect(util.IgnoreOutdatedResourceVersion(err)).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + if dep != nil { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == origRequests.Cpu().MilliValue() { + return true, nil + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(origRequests)) + } + }) + }) + }) + + Context("applying the second instance of IBPOrderer CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + // NOTE: THIS COUNTER MUST BE EQUAL TO THE NUMBER OF It() ROUTINES IN THIS CONTEXT + checks_remaining := 2 + + // Set up the orderer before the FIRST It() of this context + BeforeEach(func() { + if orderer2 == nil { + orderer2, orderer2nodes = GetOrderer2() + err := helper.CreateOrderer(ibpCRClient, orderer2.CR) + Expect(err).NotTo(HaveOccurred()) + } + }) + + // Tear down the orderer after the LAST It() in this context + AfterEach(func() { + checks_remaining-- + if checks_remaining == 0 { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(orderer2.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + orderer2 = nil + orderer2nodes = nil + } + }) + + It("creates a second IBPOrderer custom resource", func() { + By("starting a pod", func() { + for _, node := range orderer2nodes { + Eventually(node.PodIsRunning).Should((Equal(true))) + } + }) + }) + + PIt("should find zone and region", func() { + for _, node := range orderer2nodes { + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas >= 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), node.NodeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking zone", func() { + Expect(orderer2.TestAffinityZone(dep)).To((Equal(true))) + }) + + By("checking region", func() { + Expect(orderer2.TestAffinityRegion(dep)).To((Equal(true))) + }) + } + }) + + It("adjust cluster size should not change number of orderers", func() { + By("increase number of nodes", func() { + orderer2.CR.Spec.ClusterSize = 5 + bytes, err := json.Marshal(orderer2.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibporderers").Name(orderer2.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(orderer2.NumberOfOrdererNodeDeployments).Should((Equal(3))) + }) + + By("reducing cluster size should not change the number of nodes", func() { + orderer2.CR.Spec.ClusterSize = 1 + bytes, err := json.Marshal(orderer2.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibporderers").Name(orderer2.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(orderer2.NumberOfOrdererNodeDeployments).Should((Equal(3))) + + secretResult := ibpCRClient.Get().Namespace(namespace).Resource("secrets").Name(fmt.Sprintf("ecert-%s%s%d-signcert", orderer2.Name, baseorderer.NODE, 3)).Do(context.TODO()) + Expect(secretResult.Error()).To(HaveOccurred()) + + serviceResult := ibpCRClient.Get().Namespace(namespace).Resource("services").Name(fmt.Sprintf("%s%s%dservice", orderer2.Name, baseorderer.NODE, 3)).Do(context.TODO()) + Expect(serviceResult.Error()).To(HaveOccurred()) + + cm := ibpCRClient.Get().Namespace(namespace).Resource("configmaps").Name(fmt.Sprintf("%s-%s%d-cm", orderer2.Name, baseorderer.NODE, 3)).Do(context.TODO()) + Expect(cm.Error()).To(HaveOccurred()) + + pvc := ibpCRClient.Get().Namespace(namespace).Resource("persistentvolumeclaims").Name(fmt.Sprintf("%s-%s%d-pvc", orderer2.Name, baseorderer.NODE, 3)).Do(context.TODO()) + Expect(pvc.Error()).To(HaveOccurred()) + }) + }) + }) + + Context("applying incorrectly configured third instance of IBPOrderer CR", func() { + + // NOTE: THIS COUNTER MUST BE EQUAL TO THE NUMBER OF It() ROUTINES IN THIS CONTEXT + checks_remaining := 1 + + // Set up the orderer before the FIRST It() of this context + BeforeEach(func() { + if orderer3 == nil { + orderer3, orderer3nodes = GetOrderer3() + err := helper.CreateOrderer(ibpCRClient, orderer3.CR) + Expect(err).NotTo(HaveOccurred()) + } + }) + + // Tear down the orderer after the LAST It() in this context + AfterEach(func() { + checks_remaining-- + if checks_remaining == 0 { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(orderer3.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + orderer3 = nil + orderer3nodes = nil + } + }) + + It("should set the CR status to error", func() { + Eventually(orderer3.pollForCRStatus).Should((Equal(current.Error))) + + crStatus := ¤t.IBPOrderer{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibporderers").Name(orderer3.Name).Do(context.TODO()) + result.Into(crStatus) + + Expect(crStatus.Status.Message).To(ContainSubstring("Number of Cluster Node Locations does not match cluster size")) + }) + }) + + Context("deleting all child nodes should delete parent of fourth instance of IBPOrderer CR", func() { + + // NOTE: THIS COUNTER MUST BE EQUAL TO THE NUMBER OF It() ROUTINES IN THIS CONTEXT + checks_remaining := 3 + + // Set up the orderer before the FIRST It() of this context + BeforeEach(func() { + if orderer4 == nil { + orderer4, orderer4nodes = GetOrderer4() + err := helper.CreateOrderer(ibpCRClient, orderer4.CR) + Expect(err).NotTo(HaveOccurred()) + } + }) + + // Tear down the orderer after the LAST It() in this context + AfterEach(func() { + checks_remaining-- + if checks_remaining == 0 { + // Orderer4 will have been deleted during the test context - expect an error on get() + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(orderer4.Name).Do(context.TODO()) + Expect(result.Error()).To(HaveOccurred()) + + orderer4 = nil + orderer4nodes = nil + } + }) + + It("creates a fourth IBPOrderer custom resource", func() { + By("starting a pod", func() { + for _, node := range orderer4nodes { + Eventually(node.PodIsRunning).Should((Equal(true))) + } + }) + }) + + It("does not delete the parent if few child nodes are deleted", func() { + node := orderer4nodes[0] + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(node.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + node = orderer4nodes[1] + result = ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(node.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + // Wait for second node to be deleted + err := wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { + result := ibpCRClient.Get().Namespace(namespace).Resource("ibporderers").Name(node.Name).Do(context.TODO()) + + if result.Error() == nil { + return false, nil + } + return true, nil + }) + Expect(err).NotTo(HaveOccurred()) + + parent := ¤t.IBPOrderer{} + result = ibpCRClient.Get().Namespace(namespace).Resource("ibporderers").Name(orderer4.CR.GetName()).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + err = result.Into(parent) + Expect(err).NotTo(HaveOccurred()) + }) + + It("deletes the parent if all child nodes are deleted", func() { + node := orderer4nodes[2] + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(node.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + err := wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { + parent := ¤t.IBPOrderer{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibporderers").Name(orderer4.CR.Name).Do(context.TODO()) + if result.Error() == nil { + err := result.Into(parent) + Expect(err).NotTo(HaveOccurred()) + return false, nil + } + return true, nil + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("pod restart", func() { + var ( + orderernode *Orderer + ) + + BeforeEach(func() { + _, nodes := GetOrderer() + orderernode = &nodes[0] + }) + + Context("should not trigger deployment restart if config overrides not updated", func() { + var ( + oldPodName string + ) + + BeforeEach(func() { + Eventually(orderernode.PodIsRunning).Should((Equal(true))) + + pods := orderernode.GetPods() + if len(pods) > 0 { + oldPodName = pods[0].Name + } + }) + + It("does not restart the orderer node pod", func() { + Eventually(orderernode.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := orderernode.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == oldPodName { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + + Context("should trigger deployment restart if config overrides is updated", func() { + var ( + oldPodName string + ) + + BeforeEach(func() { + Eventually(orderernode.PodIsRunning).Should((Equal(true))) + pods := orderernode.GetPods() + Expect(len(pods)).To(Equal(1)) + oldPodName = pods[0].Name + + configOverride := &config.Orderer{ + Orderer: v2.Orderer{ + FileLedger: v1.FileLedger{ + Location: "/temp1", + }, + }, + } + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + orderernode.CR.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + + bytes, err := json.Marshal(orderernode.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibporderers").Name(orderernode.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + It("restarts the pod", func() { + Eventually(orderernode.PodIsRunning).Should((Equal(false))) + Eventually(orderernode.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := orderernode.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == oldPodName { + return false + } + + return true + }).Should(Equal(true)) + }) + }) + }) + + Context("delete crs", func() { + It("should delete IBPOrderer CR", func() { + By("deleting the first instance of IBPOrderer CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibporderers").Name(orderer.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + }) + }) + }) +}) + +func GetOrderer() (*Orderer, []Orderer) { + name := "ibporderer" + configOverride := &config.Orderer{ + Orderer: v2.Orderer{ + General: v2.General{ + ListenPort: uint16(7052), + }, + }, + } + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + GenesisProfile: "Initial", + Domain: integration.TestAutomation1IngressDomain, + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + ClusterSecret: []*current.SecretSpec{ + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + }, + Resources: ¤t.OrdererResources{ + Orderer: &corev1.ResourceRequirements{ + Requests: defaultRequestsOrderer, + Limits: defaultLimitsOrderer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: defaultRequestsProxy, + Limits: defaultLimitsProxy, + }, + }, + ConfigOverride: &runtime.RawExtension{Raw: configBytes}, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + nodes := []Orderer{ + Orderer{ + Name: name + "node1", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = name + "node1" + + return &Orderer{ + Name: name, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, nodes +} + +func GetOrderer2() (*Orderer, []Orderer) { + name := "ibporderer2" + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + ClusterSize: 3, + SystemChannelName: "channel1", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + Domain: integration.TestAutomation1IngressDomain, + GenesisProfile: "Initial", + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + ClusterSecret: []*current.SecretSpec{ + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + }, + Zone: "select", + Region: "select", + Resources: ¤t.OrdererResources{ + Orderer: &corev1.ResourceRequirements{ + Requests: defaultRequestsOrderer, + Limits: defaultLimitsOrderer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: defaultRequestsProxy, + Limits: defaultLimitsProxy, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + nodes := []Orderer{ + Orderer{ + Name: name + "node1", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + Orderer{ + Name: name + "node2", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node2", + Namespace: namespace, + Client: kclient, + }, + }, + Orderer{ + Name: name + "node3", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node3", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = name + "node1" + nodes[1].CR.ObjectMeta.Name = name + "node2" + nodes[2].CR.ObjectMeta.Name = name + "node3" + + return &Orderer{ + Name: name, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, nodes +} + +func GetOrderer3() (*Orderer, []Orderer) { + name := "ibporderer3" + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + ClusterSize: 1, + SystemChannelName: "channel1", + OrgName: "ordererorg", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + Domain: integration.TestAutomation1IngressDomain, + GenesisProfile: "Initial", + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + Secret: ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + ClusterLocation: []current.IBPOrdererClusterLocation{ + current.IBPOrdererClusterLocation{ + Zone: "dal1", + Region: "us-south1", + }, + current.IBPOrdererClusterLocation{ + Zone: "dal2", + Region: "us-south2", + }, + }, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + nodes := []Orderer{ + Orderer{ + Name: name + "node1", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = name + "node1" + + return &Orderer{ + Name: name, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, nodes +} + +func GetOrderer4() (*Orderer, []Orderer) { + name := "ibporderer4" + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + ClusterSize: 3, + SystemChannelName: "channel1", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + Domain: integration.TestAutomation1IngressDomain, + GenesisProfile: "Initial", + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.OrdererTag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + ClusterSecret: []*current.SecretSpec{ + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + }, + Zone: "select", + Region: "select", + Resources: ¤t.OrdererResources{ + Orderer: &corev1.ResourceRequirements{ + Requests: defaultRequestsOrderer, + Limits: defaultLimitsOrderer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: defaultRequestsProxy, + Limits: defaultLimitsProxy, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + nodes := []Orderer{ + Orderer{ + Name: name + "node1", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + Orderer{ + Name: name + "node2", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node2", + Namespace: namespace, + Client: kclient, + }, + }, + Orderer{ + Name: name + "node3", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node3", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = name + "node1" + nodes[1].CR.ObjectMeta.Name = name + "node2" + nodes[2].CR.ObjectMeta.Name = name + "node3" + + return &Orderer{ + Name: name, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, nodes +} + +func GetOrderer5() (*Orderer, []Orderer) { + name := "ibporderer5" + cr := ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + UseChannelLess: ¤t.BoolTrue, + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + GenesisProfile: "Initial", + Domain: integration.TestAutomation1IngressDomain, + Images: ¤t.OrdererImages{ + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + OrdererImage: integration.OrdererImage, + OrdererTag: integration.Orderer24Tag, + OrdererInitImage: integration.InitImage, + OrdererInitTag: integration.InitTag, + }, + ClusterSecret: []*current.SecretSpec{ + ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + }, + Resources: ¤t.OrdererResources{ + Orderer: &corev1.ResourceRequirements{ + Requests: defaultRequestsOrderer, + Limits: defaultLimitsOrderer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: defaultRequestsProxy, + Limits: defaultLimitsProxy, + }, + }, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion24 + "-1", + }, + } + cr.Name = name + + nodes := []Orderer{ + Orderer{ + Name: name + "node1", + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = name + "node1" + + return &Orderer{ + Name: name, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + }, nodes +} + +type Orderer struct { + Name string + CR *current.IBPOrderer + NodeName string + integration.NativeResourcePoller +} + +func (orderer *Orderer) pollForCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPOrderer{} + + result := ibpCRClient.Get().Namespace(namespace).Resource("ibporderers").Name(orderer.Name).Do(context.TODO()) + result.Into(crStatus) + + return crStatus.Status.Type +} + +func (orderer *Orderer) allInitSecretsExist() bool { + prefix := "ecert-" + orderer.NodeName + name := prefix + "-admincerts" + _, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + name = prefix + "-cacerts" + _, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + name = prefix + "-signcert" + _, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + name = prefix + "-keystore" + _, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + prefix = "tls-" + orderer.NodeName + name = prefix + "-cacerts" + _, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + name = prefix + "-signcert" + _, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + name = prefix + "-keystore" + _, err = kclient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false + } + + return true +} + +func (o *Orderer) DeploymentExists() bool { + dep, err := kclient.AppsV1().Deployments(namespace).Get(context.TODO(), o.NodeName, metav1.GetOptions{}) + if err == nil && dep != nil { + return true + } + + return false +} diff --git a/integration/peer/peer_suite_test.go b/integration/peer/peer_suite_test.go new file mode 100644 index 00000000..75ded632 --- /dev/null +++ b/integration/peer/peer_suite_test.go @@ -0,0 +1,170 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peer_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} + +const ( + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + peerAdminUsername = "peer-admin" + peerUsername = "peer" +) + +var ( + namespaceSuffix = "peer" + operatorDeploymentFile = "../../testdata/deploy/operator.yaml" + + namespace string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + testFailed bool + wd string +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(240 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cfg := &integration.Config{ + OperatorDeployment: operatorDeploymentFile, + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, namespaceSuffix, "") + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() +}) + +var _ = AfterSuite(func() { + + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + err := integration.Cleanup(GinkgoWriter, kclient, namespace) + Expect(err).NotTo(HaveOccurred()) +}) + +func CreatePeer(peer *Peer) { + result := ibpCRClient.Post().Namespace(namespace).Resource("ibppeers").Body(peer.CR).Do(context.TODO()) + err := result.Error() + if !k8serrors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } +} + +type Peer struct { + Name string + CR *current.IBPPeer + integration.NativeResourcePoller +} + +func (peer *Peer) pollForCRStatus() current.IBPCRStatusType { + crStatus := ¤t.IBPPeer{} + + result := ibpCRClient.Get().Namespace(namespace).Resource("ibppeers").Name(peer.Name).Do(context.TODO()) + result.Into(crStatus) + + return crStatus.Status.Type +} + +func (peer *Peer) ingressExists() bool { + opts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", peer.Name), + } + ingressList, err := kclient.NetworkingV1().Ingresses(namespace).List(context.TODO(), opts) + if err != nil { + return false + } + for _, ingress := range ingressList.Items { + if strings.HasPrefix(ingress.Name, peer.Name) { + return true + } + } + + return false +} + +func (peer *Peer) getPVCStorageFromSpec(name string) string { + pvc, err := kclient.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "" + } + + storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + + return storage.String() +} + +func (peer *Peer) checkAdminCertUpdate() string { + secretName := fmt.Sprintf("%s-%s-%s", "ecert", peer.Name, "admincerts") + sec, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + certBytes := sec.Data["admincert-0.pem"] + str := base64.StdEncoding.EncodeToString(certBytes) + return str +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession(helper.GetCommand(filepath.Join(wd, "../../scripts/download_binaries.sh")), "Download Binaries") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} diff --git a/integration/peer/peer_test.go b/integration/peer/peer_test.go new file mode 100644 index 00000000..a735115b --- /dev/null +++ b/integration/peer/peer_test.go @@ -0,0 +1,906 @@ +//go:build !pkcs11 +// +build !pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peer_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/yaml" +) + +const ( + adminCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUakNDQWZXZ0F3SUJBZ0lVWHdiSXdVeXBmZE1WU1dZU24zWFBFcFZhd2tRd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRFeE1qRTJNRGd3TUZvWERUSXdNVEV4TVRFMk1UTXdNRm93WHpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1SQXdEZ1lEVlFRREV3ZHZjbVJsY21WeU1Ga3dFd1lICktvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUV2S2RXNytpVVYxbVB3N0J3S2FESkNYVmpha2dqTDhwWCtWaHcKaENLSkNLeXE4Vis4U29tK1AyYzBXdExxbytFU1dVWENKNFJiN0pyOWIzZVc2SmplaHFPQmhUQ0JnakFPQmdOVgpIUThCQWY4RUJBTUNCNEF3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVSUW4yemN2b3hUUE1rV1JPClZxaG9DL293YXZnd0h3WURWUjBqQkJnd0ZvQVVTUU9ZL0Z5YnNXcTlIWEo3c296aUFyLzhtQkV3SWdZRFZSMFIKQkJzd0dZSVhVMkZoWkhNdFRXRmpRbTl2YXkxUWNtOHViRzlqWVd3d0NnWUlLb1pJemowRUF3SURSd0F3UkFJZwpCNEZmM1dUOWYxcWRjaXBUUzJ6dXFWVDl5WUc1S0dYWmpTN0cyaHZrd0JJQ0lHMXRHb0FkZzNoVWk2TkpyblFJClhaRXFOcWlJWmhPL2hPRmd1emE0VUpZaQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + signCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUekNDQWZXZ0F3SUJBZ0lVQWNnakVkOHBkOE43Vjg0YmFleG4yQzU0dWtzd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRFeE1qRTRNell3TUZvWERUSTBNVEV4TURFNE5ERXdNRm93WHpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1SQXdEZ1lEVlFRREV3ZHZjbVJsY21WeU1Ga3dFd1lICktvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUU2NFJwK1pvVnYyaTg0cE5KUUFNUHJpenJmZVlNT2Y0UnZ1eHkKNHZOUU1Pd3JEemlIZkFLTnZmdUJlbDhpQ2dndHRXM2paZTVkSEFZaFVIS2Ryb3FodmFPQmhUQ0JnakFPQmdOVgpIUThCQWY4RUJBTUNCNEF3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVWakl3Y1YwYXRNZmZWV1E5CnhtenpXVG9uYmlJd0h3WURWUjBqQkJnd0ZvQVVTUU9ZL0Z5YnNXcTlIWEo3c296aUFyLzhtQkV3SWdZRFZSMFIKQkJzd0dZSVhVMkZoWkhNdFRXRmpRbTl2YXkxUWNtOHViRzlqWVd3d0NnWUlLb1pJemowRUF3SURTQUF3UlFJaApBUGE4Y3VjL3QvOW45ZDZlSHZoUWdialNBK1k2dytERW1ka2RpdnJHaGE5RUFpQXdTZStlVGdsQWJYQVNoTnhwCkJpR0Rjc2IwZ1pmRmhQd1pIN1VnQW1IQjN3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + certKey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1p2VWRsUVZ6QlVSc3I2STMKZEVvd0ZlVGkvVkNLZVZqMmFwN2x3QWNYSzJLaFJBTkNBQVRyaEduNW1oVy9hTHppazBsQUF3K3VMT3Q5NWd3NQovaEcrN0hMaTgxQXc3Q3NQT0lkOEFvMjkrNEY2WHlJS0NDMjFiZU5sN2wwY0JpRlFjcDJ1aXFHOQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" + caCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVZi84bk94M2NqM1htVzNDSUo1L0Q1ejRRcUVvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBek1ERTNNamd3TUZvWERUTTBNVEF5TmpFM01qZ3dNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVSbzNmbUc2UHkyUHd6cUMwNnFWZDlFOFgKZ044eldqZzFMb3lnMmsxdkQ4MXY1dENRRytCTVozSUJGQnI2VTRhc0tZTUREakd6TElERmdUUTRjVDd1VktORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZFa0RtUHhjbTdGcXZSMXllN0tNNGdLLy9KZ1JNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRC92QVFVSEh2SWwKQWZZLzM5UWdEU2ltTWpMZnhPTG44NllyR1EvWHpkQVpBaUFpUmlyZmlMdzVGbXBpRDhtYmlmRjV4bzdFUzdqNApaUWQyT0FUNCt5OWE0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" +) + +type CoreConfig interface { + ToBytes() ([]byte, error) +} + +var ( + defaultRequestsPeer = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsPeer = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + defaultRequestsFluentd = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsFluentd = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + defaultRequestsCouchdb = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("40M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsCouchdb = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + defaultRequestsDind = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsDind = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + defaultRequestsProxy = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + + defaultLimitsProxy = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + testMSPSpec = ¤t.MSPSpec{ + Component: ¤t.MSP{ + KeyStore: certKey, + SignCerts: signCert, + CACerts: []string{caCert}, + AdminCerts: []string{adminCert}, + }, + TLS: ¤t.MSP{ + KeyStore: certKey, + SignCerts: signCert, + CACerts: []string{caCert}, + }, + } +) + +var ( + peer *Peer + peer2 *Peer + peer3 *Peer +) + +var _ = Describe("Interaction between IBP-Operator and Kubernetes cluster", func() { + SetDefaultEventuallyTimeout(420 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + BeforeEach(func() { + peer = GetPeer1() + CreatePeer(peer) + + peer2 = GetPeer2() + CreatePeer(peer2) + + peer3 = GetPeer3() + CreatePeer(peer3) + + integration.ClearOperatorConfig(kclient, namespace) + }) + + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("IBPPeer controller", func() { + When("applying first instance of IBPPeer CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + It("creates a IBPPeer custom resource", func() { + By("setting the CR status to deploying", func() { + Eventually(peer.pollForCRStatus).Should((Equal(current.Deploying))) + }) + + By("creating pvcs", func() { + Eventually(peer.PVCExists).Should((Equal(true))) + Expect(peer.getPVCStorageFromSpec(fmt.Sprintf("%s-pvc", peer.Name))).To(Equal("150Mi")) + Expect(peer.getPVCStorageFromSpec(fmt.Sprintf("%s-statedb-pvc", peer.Name))).To(Equal("1Gi")) + }) + + By("creating a service", func() { + Eventually(peer.ServiceExists).Should((Equal(true))) + }) + + By("creating a configmap", func() { + Eventually(peer.ConfigMapExists).Should((Equal(true))) + }) + + By("starting a ingress", func() { + Eventually(peer.IngressExists).Should((Equal(true))) + }) + + By("creating a deployment", func() { + Eventually(peer.DeploymentExists).Should((Equal(true))) + }) + + By("starting a pod", func() { + Eventually(peer.PodIsRunning).Should((Equal(true))) + }) + + By("creating config map that contains spec", func() { + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), peer.Name+"-spec", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + By("setting the CR status to deployed when pod is running", func() { + Eventually(peer.pollForCRStatus).Should((Equal(current.Deployed))) + }) + + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), peer.Name+"-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + coreBytes := cm.BinaryData["core.yaml"] + core, err := config.ReadCoreFromBytes(coreBytes) + Expect(err).NotTo(HaveOccurred()) + + By("overriding peer section in core.yaml", func() { + configOverride, err := peer.CR.GetConfigOverride() + Expect(err).NotTo(HaveOccurred()) + bytes, err := configOverride.(CoreConfig).ToBytes() + Expect(err).NotTo(HaveOccurred()) + coreConfig := &config.Core{} + err = yaml.Unmarshal(bytes, coreConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal(coreConfig.Peer.ID)) + Expect(string(coreBytes)).To(ContainSubstring("chaincode")) + Expect(string(coreBytes)).To(ContainSubstring("vm")) + Expect(string(coreBytes)).To(ContainSubstring("ledger")) + Expect(string(coreBytes)).To(ContainSubstring("operations")) + Expect(string(coreBytes)).To(ContainSubstring("metrics")) + }) + + By("overriding chaincode section in core.yaml", func() { + configOverride, err := peer.CR.GetConfigOverride() + Expect(err).NotTo(HaveOccurred()) + bytes, err := configOverride.(CoreConfig).ToBytes() + Expect(err).NotTo(HaveOccurred()) + coreConfig := &config.Core{} + err = yaml.Unmarshal(bytes, coreConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Chaincode.StartupTimeout).To(Equal(coreConfig.Chaincode.StartupTimeout)) + Expect(core.Chaincode.ExecuteTimeout).To(Equal(coreConfig.Chaincode.ExecuteTimeout)) + Expect(core.Chaincode.InstallTimeout).To(Equal(coreConfig.Chaincode.InstallTimeout)) + }) + + By("creating secrets contain DeliveryClient.AddressOverrides ca certs", func() { + Expect(core.Peer.DeliveryClient.AddressOverrides[0].CACertsFile).To(Equal("/orderer/certs/cert0.pem")) + Expect(core.Peer.DeliveryClient.AddressOverrides[1].CACertsFile).To(Equal("/orderer/certs/cert1.pem")) + + s, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), peer.Name+"-orderercacerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + data := s.Data + Expect(len(data)).To(Equal(2)) + + caCertBytes, err := base64.StdEncoding.DecodeString(caCert) + Expect(err).NotTo(HaveOccurred()) + + signCertBytes, err := base64.StdEncoding.DecodeString(signCert) + Expect(err).NotTo(HaveOccurred()) + + Expect(data["cert0.pem"]).To(Equal(caCertBytes)) + Expect(data["cert1.pem"]).To(Equal(signCertBytes)) + }) + + By("overriding operations section in core.yaml", func() { + configOverride, err := peer.CR.GetConfigOverride() + Expect(err).NotTo(HaveOccurred()) + bytes, err := configOverride.(CoreConfig).ToBytes() + Expect(err).NotTo(HaveOccurred()) + coreConfig := &config.Core{} + err = yaml.Unmarshal(bytes, coreConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Operations.ListenAddress).To(Equal(coreConfig.Operations.ListenAddress)) + Expect(core.Operations.TLS.Certificate).To(Equal(coreConfig.Operations.TLS.Certificate)) + }) + + By("overriding metrics section in core.yaml", func() { + configOverride, err := peer.CR.GetConfigOverride() + Expect(err).NotTo(HaveOccurred()) + bytes, err := configOverride.(CoreConfig).ToBytes() + Expect(err).NotTo(HaveOccurred()) + coreConfig := &config.Core{} + err = yaml.Unmarshal(bytes, coreConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Metrics.Statsd.Address).To(Equal(coreConfig.Metrics.Statsd.Address)) + }) + }) + + // TODO: Test marked as pending until portworx issue is resolved, currently zone is + // required to be passed for provisioning to work. Once portworx is working again, this + // test should be reenabled + PIt("should not find zone and region", func() { + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas >= 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking zone", func() { + Expect(peer.TestAffinityZone(dep)).Should((Equal(false))) + }) + + By("checking region", func() { + Expect(peer.TestAffinityRegion(dep)).Should((Equal(false))) + }) + }) + + When("the custom resource is updated", func() { + var ( + dep *appsv1.Deployment + newResourceRequestsPeer corev1.ResourceList + newResourceLimitsPeer corev1.ResourceList + newResourceRequestsProxy corev1.ResourceList + newResourceLimitsProxy corev1.ResourceList + newResourceRequestsCouchdb corev1.ResourceList + newResourceLimitsCouchdb corev1.ResourceList + ) + + BeforeEach(func() { + newResourceRequestsPeer = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("90m"), + corev1.ResourceMemory: resource.MustParse("180M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsPeer = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("90m"), + corev1.ResourceMemory: resource.MustParse("180M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + newResourceRequestsProxy = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("91m"), + corev1.ResourceMemory: resource.MustParse("181M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsProxy = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("91m"), + corev1.ResourceMemory: resource.MustParse("181M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + newResourceRequestsCouchdb = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("193m"), + corev1.ResourceMemory: resource.MustParse("383M"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + } + newResourceLimitsCouchdb = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("193m"), + corev1.ResourceMemory: resource.MustParse("383M"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + } + + peer.CR.Spec.Resources = ¤t.PeerResources{ + Peer: &corev1.ResourceRequirements{ + Requests: newResourceRequestsPeer, + Limits: newResourceLimitsPeer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: newResourceRequestsProxy, + Limits: newResourceLimitsProxy, + }, + CouchDB: &corev1.ResourceRequirements{ + Requests: newResourceRequestsCouchdb, + Limits: newResourceLimitsCouchdb, + }, + } + + startupTimeout, err := common.ParseDuration("200s") + Expect(err).NotTo(HaveOccurred()) + + configOverride := config.Core{ + Core: v2.Core{ + Peer: v2.Peer{ + ID: "new-peerid", + }, + Chaincode: v2.Chaincode{ + StartupTimeout: startupTimeout, + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + + peer.CR.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + + Eventually(peer.DeploymentExists).Should((Equal(true))) + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("updates the instance of IBPPeer if resources and config overrides are updated in CR", func() { + peerResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(peerResources.Requests).To(Equal(defaultRequestsPeer)) + Expect(peerResources.Limits).To(Equal(defaultLimitsPeer)) + + proxyResources := dep.Spec.Template.Spec.Containers[1].Resources + Expect(proxyResources.Requests).To(Equal(defaultRequestsProxy)) + Expect(proxyResources.Limits).To(Equal(defaultLimitsProxy)) + + couchDBResources := dep.Spec.Template.Spec.Containers[2].Resources + Expect(couchDBResources.Requests).To(Equal(defaultRequestsCouchdb)) + Expect(couchDBResources.Limits).To(Equal(defaultLimitsCouchdb)) + + bytes, err := json.Marshal(peer.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibppeers").Name(peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas >= 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == newResourceRequestsProxy.Cpu().MilliValue() { + return true, nil + } + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + updatedPeerResources := dep.Spec.Template.Spec.Containers[0].Resources + Expect(updatedPeerResources.Requests).To(Equal(newResourceRequestsPeer)) + Expect(updatedPeerResources.Limits).To(Equal(newResourceLimitsPeer)) + + updatedProxyResources := dep.Spec.Template.Spec.Containers[1].Resources + Expect(updatedProxyResources.Requests).To(Equal(newResourceRequestsProxy)) + Expect(updatedProxyResources.Limits).To(Equal(newResourceLimitsProxy)) + + updatedCouchDBResources := dep.Spec.Template.Spec.Containers[2].Resources + Expect(updatedCouchDBResources.Requests).To(Equal(newResourceRequestsCouchdb)) + Expect(updatedCouchDBResources.Limits).To(Equal(newResourceLimitsCouchdb)) + + By("updating the config map with new values from override", func() { + core := &config.Core{} + + Eventually(func() string { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), peer.Name+"-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + coreBytes := cm.BinaryData["core.yaml"] + core, err = config.ReadCoreFromBytes(coreBytes) + Expect(err).NotTo(HaveOccurred()) + + return core.Peer.ID + }).Should(Equal("new-peerid")) + + configOverride, err := peer.CR.GetConfigOverride() + Expect(err).NotTo(HaveOccurred()) + + bytes, err := configOverride.(CoreConfig).ToBytes() + Expect(err).NotTo(HaveOccurred()) + + coreConfig := &config.Core{} + err = yaml.Unmarshal(bytes, coreConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Chaincode.StartupTimeout).To(Equal(coreConfig.Chaincode.StartupTimeout)) + }) + }) + }) + + When("a deployment managed by operator is manually edited", func() { + var ( + err error + dep *appsv1.Deployment + ) + + BeforeEach(func() { + Eventually(func() bool { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + if err == nil && dep != nil { + return true + } + return false + }).Should(Equal(true)) + }) + + It("restores states", func() { + origRequests := dep.Spec.Template.Spec.Containers[0].Resources.Requests + + dep.Spec.Template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("107m"), + corev1.ResourceMemory: resource.MustParse("207M"), + } + + depBytes, err := json.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + kclient.AppsV1().Deployments(namespace).Patch(context.TODO(), peer.Name, types.MergePatchType, depBytes, metav1.PatchOptions{}) + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 300*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + if dep != nil { + if len(dep.Spec.Template.Spec.Containers) >= 1 { + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() == origRequests.Cpu().MilliValue() { + return true, nil + } + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(dep.Spec.Template.Spec.Containers[0].Resources.Requests).To(Equal(origRequests)) + }) + }) + + When("admin certs are updated in peer spec", func() { + It("updates the admin cert secret", func() { + sec, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), "ecert-ibppeer1-admincerts", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + certBytes := sec.Data["admincert-0.pem"] + certBase64 := base64.StdEncoding.EncodeToString(certBytes) + Expect(certBase64).To(Equal(adminCert)) + + peer.CR.Spec.Secret.MSP.Component.AdminCerts = []string{signCert} + bytes, err := json.Marshal(peer.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibppeers").Name(peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(peer.checkAdminCertUpdate).Should(Equal(signCert)) + }) + }) + }) + + When("applying the second instance of IBPPeer CR", func() { + var ( + err error + dep *appsv1.Deployment + ) + + It("creates a second IBPPeer custom resource", func() { + By("starting a pod", func() { + Eventually(peer2.PodIsRunning).Should((Equal(true))) + }) + }) + + // TODO: Test marked as pending until portworx issue is resolved, currently zone is + // required to be passed for provisioning to work. Once portworx is working again, this + // test should be reenabled + PIt("should find zone and region", func() { + // Wait for new deployment before querying deployment for updates + wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer2.Name, metav1.GetOptions{}) + if dep != nil { + if dep.Status.UpdatedReplicas >= 1 && dep.Status.Conditions[0].Type == appsv1.DeploymentAvailable { + return true, nil + } + } + return false, nil + }) + + dep, err = kclient.AppsV1().Deployments(namespace).Get(context.TODO(), peer2.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking zone", func() { + Expect(peer2.TestAffinityZone(dep)).To((Equal(true))) + }) + + By("checking region", func() { + Expect(peer2.TestAffinityRegion(dep)).To((Equal(true))) + }) + }) + }) + + Context("operator pod restart", func() { + var ( + oldPodName string + ) + + Context("should not trigger deployment restart if config overrides not updated", func() { + BeforeEach(func() { + Eventually(peer.PodIsRunning).Should((Equal(true))) + + Eventually(func() int { return len(peer.GetRunningPods()) }).Should(Equal(1)) + oldPodName = peer.GetRunningPods()[0].Name + }) + + It("does not restart the peer pod", func() { + Eventually(peer.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := peer.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == oldPodName { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + + PContext("should trigger deployment restart if config overrides are updated", func() { + BeforeEach(func() { + Eventually(peer.PodIsRunning).Should((Equal(true))) + Eventually(func() int { + return len(peer.GetPods()) + }).Should(Equal(1)) + + configOverride := config.Core{ + Core: v2.Core{ + Peer: v2.Peer{ + ID: "new-id", + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + + peer.CR.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + + bytes, err := json.Marshal(peer.CR) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource("ibppeers").Name(peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + It("restarts the peer pod", func() { + Eventually(peer.PodIsRunning).Should((Equal(false))) + Eventually(peer.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := peer.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == oldPodName { + return false + } + + return true + }).Should(Equal(true)) + }) + }) + }) + + When("applying incorrectly configured third instance of IBPPeer CR", func() { + It("should set the CR status to error", func() { + Eventually(peer3.pollForCRStatus).Should((Equal(current.Error))) + + crStatus := ¤t.IBPPeer{} + result := ibpCRClient.Get().Namespace(namespace).Resource("ibppeers").Name(peer3.Name).Do(context.TODO()) + result.Into(crStatus) + + Expect(crStatus.Status.Message).To(ContainSubstring("user must accept license before continuing")) + }) + }) + + Context("delete crs", func() { + It("should delete IBPPeer CR", func() { + By("deleting the first instance of IBPPeer CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibppeers").Name(peer.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + By("deleting the second instance of IBPPeer CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibppeers").Name(peer2.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + + By("deleting the third instance of IBPPeer CR", func() { + result := ibpCRClient.Delete().Namespace(namespace).Resource("ibppeers").Name(peer3.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + }) + }) + }) + }) +}) + +func GetPeer1() *Peer { + startupTimeout, err := common.ParseDuration("200s") + Expect(err).NotTo(HaveOccurred()) + executeTimeout, err := common.ParseDuration("20s") + Expect(err).NotTo(HaveOccurred()) + installTimeout, err := common.ParseDuration("600s") + Expect(err).NotTo(HaveOccurred()) + + configOverride := config.Core{ + Core: v2.Core{ + Peer: v2.Peer{ + ID: "testPeerID", + DeliveryClient: v1.DeliveryClient{ + AddressOverrides: []v1.AddressOverride{ + v1.AddressOverride{ + CACertsFile: caCert, + }, + v1.AddressOverride{ + CACertsFile: signCert, + }, + }, + }, + }, + Chaincode: v2.Chaincode{ + StartupTimeout: startupTimeout, + ExecuteTimeout: executeTimeout, + InstallTimeout: installTimeout, + }, + Metrics: v1.Metrics{ + Statsd: v1.Statsd{ + Address: "127.0.0.1:9445", + }, + }, + Operations: v1.Operations{ + ListenAddress: "127.0.0.1:9444", + TLS: v1.OperationsTLS{ + Certificate: v1.File{ + File: "ops-tls-cert.pem", + }, + }, + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + + name := "ibppeer1" + cr := ¤t.IBPPeer{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPPeer", + APIVersion: "ibp.com/v1beta1", + }, + Spec: current.IBPPeerSpec{ + License: current.License{ + Accept: true, + }, + MSPID: "test-peer-mspid", + ImagePullSecrets: []string{"regcred"}, + Region: "select", + Zone: "select", + Images: ¤t.PeerImages{ + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + PeerImage: integration.PeerImage, + PeerTag: integration.PeerTag, + PeerInitImage: integration.InitImage, + PeerInitTag: integration.InitTag, + }, + Domain: integration.TestAutomation1IngressDomain, + Resources: ¤t.PeerResources{ + DinD: &corev1.ResourceRequirements{ + + Requests: defaultRequestsDind, + Limits: defaultLimitsDind, + }, + Peer: &corev1.ResourceRequirements{ + Requests: defaultRequestsPeer, + Limits: defaultLimitsPeer, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: defaultRequestsProxy, + Limits: defaultLimitsProxy, + }, + FluentD: &corev1.ResourceRequirements{ + Requests: defaultRequestsFluentd, + Limits: defaultLimitsFluentd, + }, + CouchDB: &corev1.ResourceRequirements{ + Requests: defaultRequestsCouchdb, + Limits: defaultLimitsCouchdb, + }, + }, + Storage: ¤t.PeerStorages{ + Peer: ¤t.StorageSpec{ + Size: "150Mi", + }, + StateDB: ¤t.StorageSpec{ + Size: "1Gi", + }, + }, + Ingress: current.Ingress{ + TlsSecretName: "tlssecret", + }, + Secret: ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + ConfigOverride: &runtime.RawExtension{Raw: configBytes}, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + return &Peer{ + Name: name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetPeer2() *Peer { + name := "ibppeer2" + cr := ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + License: current.License{ + Accept: true, + }, + MSPID: "test-peer2-mspid", + StateDb: "leveldb", + Region: "select", + Zone: "select", + ImagePullSecrets: []string{"regcred"}, + Images: ¤t.PeerImages{ + CouchDBImage: integration.CouchdbImage, + CouchDBTag: integration.CouchdbTag, + GRPCWebImage: integration.GrpcwebImage, + GRPCWebTag: integration.GrpcwebTag, + PeerImage: integration.PeerImage, + PeerTag: integration.PeerTag, + PeerInitImage: integration.InitImage, + PeerInitTag: integration.InitTag, + }, + Domain: integration.TestAutomation1IngressDomain, + Storage: ¤t.PeerStorages{ + Peer: ¤t.StorageSpec{ + Size: "150Mi", + }, + StateDB: ¤t.StorageSpec{ + Size: "1Gi", + }, + }, + Ingress: current.Ingress{ + TlsSecretName: "tlssecret", + }, + Secret: ¤t.SecretSpec{ + MSP: testMSPSpec, + }, + DisableNodeOU: ¤t.BoolTrue, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + return &Peer{ + Name: name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetPeer3() *Peer { + name := "ibppeer3" + cr := ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Domain: integration.TestAutomation1IngressDomain, + FabricVersion: integration.FabricVersion + "-1", + }, + } + cr.Name = name + + return &Peer{ + Name: name, + CR: cr, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: name, + Namespace: namespace, + Client: kclient, + }, + } +} diff --git a/integration/restartmgr/restartmgr_suite_test.go b/integration/restartmgr/restartmgr_suite_test.go new file mode 100644 index 00000000..40c8ef1a --- /dev/null +++ b/integration/restartmgr/restartmgr_suite_test.go @@ -0,0 +1,321 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package restartmgr_test + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "github.com/IBM-Blockchain/fabric-operator/integration" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + ibpclient "github.com/IBM-Blockchain/fabric-operator/pkg/client" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "k8s.io/client-go/kubernetes" +) + +func TestRestart(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "RestartMgr Suite") +} + +const ( + ccTarFile = "gocc.tar.gz" + + FabricBinaryVersion = "2.2.3" + FabricCABinaryVersion = "1.5.1" + + peerAdminUsername = "peer-admin" + peerUsername = "peer" + ordererUsername = "orderer" + + IBPCAS = "ibpcas" + IBPPEERS = "ibppeers" + IBPORDERERS = "ibporderers" +) + +var ( + wd string // Working directory of test + namespace string + domain string + kclient *kubernetes.Clientset + ibpCRClient *ibpclient.IBPClient + colorIndex uint + testFailed bool + caHost string + tlsBytes []byte + + org1ca *helper.CA + org1peer *helper.Peer + orderer *helper.Orderer +) + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(600 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var err error + + domain = os.Getenv("DOMAIN") + if domain == "" { + domain = integration.TestAutomation1IngressDomain + } + + wd, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "Working directory: %s\n", wd) + + cleanupFiles() + + cfg := &integration.Config{ + OperatorServiceAccount: "../../config/rbac/service_account.yaml", + OperatorRole: "../../config/rbac/role.yaml", + OperatorRoleBinding: "../../config/rbac/role_binding.yaml", + OperatorDeployment: "../../testdata/deploy/operator.yaml", + OrdererSecret: "../../testdata/deploy/orderer/secret.yaml", + PeerSecret: "../../testdata/deploy/peer/secret.yaml", + ConsoleTLSSecret: "../../testdata/deploy/console/tlssecret.yaml", + } + + namespace, kclient, ibpCRClient, err = integration.Setup(GinkgoWriter, cfg, "restartmgr", "") + Expect(err).NotTo(HaveOccurred()) + + downloadBinaries() + + CreateNetwork() +}) + +var _ = AfterSuite(func() { + if strings.ToLower(os.Getenv("SAVE_TEST")) == "true" { + return + } + + integration.Cleanup(GinkgoWriter, kclient, namespace) + + cleanupFiles() +}) + +func CreateNetwork() { + By("starting CA pod", func() { + org1ca = Org1CA() + helper.CreateCA(ibpCRClient, org1ca.CR) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + }) + + profile, err := org1ca.ConnectionProfile() + Expect(err).NotTo(HaveOccurred()) + + tlsBytes, err = util.Base64ToBytes(profile.TLS.Cert) + Expect(err).NotTo(HaveOccurred()) + + By("performing CA health check", func() { + Eventually(func() bool { + url := fmt.Sprintf("https://%s/cainfo", org1ca.Address()) + fmt.Fprintf(GinkgoWriter, "Waiting for CA health check to pass for '%s' at url: %s\n", org1ca.Name, url) + return org1ca.HealthCheck(url, tlsBytes) + }).Should(Equal(true)) + }) + + org1ca.TLSToFile(tlsBytes) + + caURL, err := url.Parse(profile.Endpoints.API) + Expect(err).NotTo(HaveOccurred()) + caHost = strings.Split(caURL.Host, ":")[0] + + By("enrolling ca admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Enroll("admin", "adminpw"), "Enroll CA Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering peer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerUsername, "peerpw", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("peer2", "peerpw2", "peer"), "Register User") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering and enrolling peer admin", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(peerAdminUsername, "peer-adminpw", "admin"), "Register Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername)) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, "org1peer", peerAdminUsername+"2")) + sess, err = helper.StartSession(org1ca.Enroll(peerAdminUsername, "peer-adminpw"), "Enroll Second Peer Admin") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + By("registering orderer identity", func() { + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err := helper.StartSession(org1ca.Register(ordererUsername, "ordererpw", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + + os.Setenv("FABRIC_CA_CLIENT_HOME", filepath.Join(wd, org1ca.Name, "org1ca-admin")) + sess, err = helper.StartSession(org1ca.Register("orderer2", "ordererpw2", "orderer"), "Register Orderer Identity") + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) + }) + + adminCertBytes, err := ioutil.ReadFile( + filepath.Join( + wd, + "org1peer", + peerAdminUsername, + "msp", + "signcerts", + "cert.pem", + ), + ) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + + By("starting Peer pod", func() { + org1peer = Org1Peer(profile.TLS.Cert, caHost, adminCertB64) + err = helper.CreatePeer(ibpCRClient, org1peer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + By("starting Orderer pod", func() { + orderer = GetOrderer(profile.TLS.Cert, caHost) + err = helper.CreateOrderer(ibpCRClient, orderer.CR) + Expect(err).NotTo(HaveOccurred()) + }) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + Eventually(orderer.Nodes[0].PodIsRunning).Should((Equal(true))) +} + +func downloadBinaries() { + os.Setenv("FABRIC_VERSION", FabricBinaryVersion) + os.Setenv("FABRIC_CA_VERSION", FabricCABinaryVersion) + sess, err := helper.StartSession( + helper.GetCommand(helper.AbsPath(wd, "../../scripts/download_binaries.sh")), + "Download Binaries", + ) + Expect(err).NotTo(HaveOccurred()) + Eventually(sess).Should(gexec.Exit(0)) +} + +func cleanupFiles() { + os.RemoveAll(filepath.Join(wd, Org1CA().Name)) + os.RemoveAll(filepath.Join(wd, Org1Peer("", "", "").Name)) + os.RemoveAll(filepath.Join(wd, GetOrderer("", "").Nodes[0].Name)) + os.RemoveAll(filepath.Join(wd, ccTarFile)) +} + +func Org1CA() *helper.CA { + cr := helper.Org1CACR(namespace, domain) + + return &helper.CA{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func Org1Peer(tlsCert, caHost, adminCert string) *helper.Peer { + cr, err := helper.Org1PeerCR(namespace, domain, peerUsername, tlsCert, caHost, adminCert) + Expect(err).NotTo(HaveOccurred()) + + return &helper.Peer{ + Domain: domain, + Name: cr.Name, + Namespace: namespace, + WorkingDir: wd, + CR: cr, + CRClient: ibpCRClient, + KClient: kclient, + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + } +} + +func GetOrderer(tlsCert, caHost string) *helper.Orderer { + cr, err := helper.OrdererCR(namespace, domain, ordererUsername, tlsCert, caHost) + Expect(err).NotTo(HaveOccurred()) + + nodes := []helper.Orderer{ + helper.Orderer{ + Name: cr.Name + "node1", + Namespace: namespace, + CR: cr.DeepCopy(), + NodeName: fmt.Sprintf("%s%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name + "node1", + Namespace: namespace, + Client: kclient, + }, + }, + } + + nodes[0].CR.ObjectMeta.Name = cr.Name + "node1" + + return &helper.Orderer{ + Name: cr.Name, + Namespace: namespace, + CR: cr, + NodeName: fmt.Sprintf("%s-%s%d", cr.Name, baseorderer.NODE, 1), + NativeResourcePoller: integration.NativeResourcePoller{ + Name: cr.Name, + Namespace: namespace, + Client: kclient, + }, + Nodes: nodes, + CRClient: ibpCRClient, + } +} diff --git a/integration/restartmgr/restartmgr_test.go b/integration/restartmgr/restartmgr_test.go new file mode 100644 index 00000000..c530dd59 --- /dev/null +++ b/integration/restartmgr/restartmgr_test.go @@ -0,0 +1,579 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package restartmgr_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/integration/helper" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("restart manager", func() { + AfterEach(func() { + // Set flag if a test falls + if CurrentGinkgoTestDescription().Failed { + testFailed = true + } + }) + + Context("peer", func() { + Context("admin certs", func() { + var ( + podName string + peer *current.IBPPeer + tlsbackup *common.Backup + ecertbackup *common.Backup + ) + + BeforeEach(func() { + Eventually(func() int { return len(org1peer.GetRunningPods()) }).Should(Equal(1)) + + podName = org1peer.GetRunningPods()[0].Name + + // Get peer's custom resource (CR) + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + peer = ¤t.IBPPeer{} + result.Into(peer) + + tlsbackup = GetBackup("tls", org1peer.Name) + ecertbackup = GetBackup("ecert", org1peer.Name) + }) + + It("restarts the peer after admin cert update", func() { + // Update the admin cert in the peer's CR spec + adminCertBytes, err := ioutil.ReadFile(filepath.Join(wd, "org1peer", peerAdminUsername+"2", "msp", "signcerts", "cert.pem")) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + peer.Spec.Secret.Enrollment.Component.AdminCerts = []string{peer.Spec.Secret.Enrollment.Component.AdminCerts[0], adminCertB64} + + bytes, err := json.Marshal(peer) + Expect(err).NotTo(HaveOccurred()) + + // Update the peer's CR spec + result := ibpCRClient.Put().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + + By("restarting peer pods", func() { + Eventually(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == podName { + return false + } + + return true + }).Should(Equal(true)) + }) + + By("not performing backup of crypto beforehand", func() { + newTLSBackup := GetBackup("tls", org1peer.Name) + newEcertBackup := GetBackup("ecert", org1peer.Name) + Expect(newTLSBackup).To(Equal(tlsbackup)) + Expect(newEcertBackup).To(Equal(ecertbackup)) + }) + + By("removing instance from restart queue", func() { + Eventually(func() bool { + restartConfig := GetRestartConfigFor("peer") + if len(restartConfig.Queues[org1peer.CR.GetMSPID()]) != 0 { + return false + } + if restartConfig.Log[org1peer.Name] == nil { + return false + } + if len(restartConfig.Log[org1peer.Name]) != 1 { + return false + } + if restartConfig.Log[org1peer.Name][0].CRName != org1peer.Name { + return false + } + + return true + }).Should(Equal(true)) + }) + }) + + It("does not restart the peer if spec is updated with empty list of admin certs", func() { + // Update the admin cert in the peer's CR spec to be empty + peer.Spec.Secret.Enrollment.Component.AdminCerts = []string{} + bytes, err := json.Marshal(peer) + Expect(err).NotTo(HaveOccurred()) + + // Update the peer's CR spec + result := ibpCRClient.Put().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + + Eventually(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == podName { + return true + } + + return false + }).Should(Equal(true)) + + }) + }) + + Context("request deployment restart", func() { + var ( + podName string + peer *current.IBPPeer + restartTime string + ) + + BeforeEach(func() { + Eventually(func() int { return len(org1peer.GetPods()) }).Should(Equal(1)) + + podName = org1peer.GetRunningPods()[0].Name + + // Get peer's custom resource (CR) + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + peer = ¤t.IBPPeer{} + result.Into(peer) + + }) + + When("peer was restarted less than 10 min ago for admin cert updates", func() { + BeforeEach(func() { + // Create operator-config map to indicate that peer was restarted recently for admin cert update + restartTime = time.Now().UTC().Format(time.RFC3339) + CreateOrUpdateOperatorConfig(peer.Name, restart.ADMINCERT, restartTime) + + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + It("does not restart the peer when admin certs are updated", func() { + By("updating peer's admin certs", func() { + adminCertBytes, err := ioutil.ReadFile(filepath.Join(wd, "org1peer", peerAdminUsername+"2", "msp", "signcerts", "cert.pem")) + Expect(err).NotTo(HaveOccurred()) + adminCertB64 := base64.StdEncoding.EncodeToString(adminCertBytes) + peer.Spec.Secret.Enrollment.Component.AdminCerts = []string{adminCertB64} + + bytes, err := json.Marshal(peer) + Expect(err).NotTo(HaveOccurred()) + + // Update the peer's CR spec + result := ibpCRClient.Put().Namespace(namespace).Resource(IBPPEERS).Name(org1peer.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(org1peer.PodIsRunning).Should((Equal(true))) + }) + + By("not restarting peer pods again", func() { + Consistently(func() bool { + pods := org1peer.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == podName { + return true + } + + return false + }, 5*time.Second).Should(Equal(true)) + }) + + // TODO: This test is failing, there seems to be a couple seconds difference between actual and expected time values. Needs investigation. + By("adding a pending restart request to config map", func() { + Skip("Skipping test, needs revision as it currently fails") + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cfg := &restart.Config{} + err = json.Unmarshal(cm.BinaryData["restart-config.yaml"], cfg) + Expect(err).NotTo(HaveOccurred()) + + Expect(cfg.Instances[peer.Name].Requests[restart.ADMINCERT].LastActionTimestamp).To(Equal(restartTime)) + Expect(cfg.Instances[peer.Name].Requests[restart.ADMINCERT].Status).To(Equal(restart.Pending)) + }) + }) + }) + }) + }) + + Context("orderer - request deployment restart", func() { + var ( + node1 helper.Orderer + + podName string + ibporderer *current.IBPOrderer + restartTime string + ) + + BeforeEach(func() { + ClearOperatorConfig() + + node1 = orderer.Nodes[0] + Eventually(node1.PodIsRunning, time.Second*60, time.Second*2).Should((Equal(true))) + Eventually(func() int { return len(node1.GetPods()) }).Should(Equal(1)) + + podName = node1.GetPods()[0].Name + result := ibpCRClient.Get().Namespace(namespace). + Resource(IBPORDERERS). + Name(node1.Name). + Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ibporderer = ¤t.IBPOrderer{} + result.Into(ibporderer) + }) + + When("reenroll is triggered", func() { + It("restarts", func() { + ibporderer.Spec.Action.Reenroll.Ecert = true + ordererbytes, err := json.Marshal(ibporderer) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Body(ordererbytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(func() bool { + restartConfig := GetRestartConfigFor("orderer") + if restartConfig == nil { + return false + } + if len(restartConfig.Queues["orderermsp"]) != 0 { + return false + } + if restartConfig.Log["ibporderer1node1"] == nil || len(restartConfig.Log["ibporderer1node1"]) != 1 { + return false + } + return true + }).Should(Equal(true)) + + }) + }) + + When("orderer was restarted less than 10 min ago for ecert reenroll", func() { + BeforeEach(func() { + // Create operator-config map to indicate that peer was restarted recently for ecert reenroll + restartTime = time.Now().UTC().Format(time.RFC3339) + CreateOrUpdateOperatorConfig(ibporderer.Name, restart.ECERTUPDATE, restartTime) + + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + It("does not restart orderer when ecerts reenroll occurs", func() { + By("triggering ecert reenroll", func() { + ibporderer.Spec.Action.Reenroll.Ecert = true + ordererbytes, err := json.Marshal(ibporderer) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Put().Namespace(namespace).Resource(IBPORDERERS).Name(node1.Name).Body(ordererbytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(node1.PodIsRunning).Should(Equal(true)) + }) + + By("not restarting orderer pods again", func() { + Eventually(func() bool { + pods := node1.GetRunningPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("adding a pending restart request to config map", func() { + Skip("Skipping test, needs revision as it currently fails") + + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cfg := &restart.Config{} + err = json.Unmarshal(cm.BinaryData["restart-config.yaml"], cfg) + Expect(err).NotTo(HaveOccurred()) + + status := cfg.Instances[ibporderer.Name].Requests[restart.ECERTUPDATE].Status + lastTimestamp := cfg.Instances[ibporderer.Name].Requests[restart.ECERTUPDATE].LastActionTimestamp + if status == restart.Pending && lastTimestamp == restartTime { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + }) + }) + + Context("CA - request deployment restart", func() { + var ( + podName string + ca *current.IBPCA + restartTime string + ) + + BeforeEach(func() { + Eventually(func() int { + return len(org1ca.GetPods()) + }).Should(Equal(1)) + + podName = org1ca.GetPods()[0].Name + + result := ibpCRClient.Get().Namespace(namespace).Resource(IBPCAS).Name(org1ca.Name).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + ca = ¤t.IBPCA{} + result.Into(ca) + }) + + Context("staggering ca restarts", func() { + var ( + bytes []byte + err error + ) + + BeforeEach(func() { + ca.Spec.Action.Renew.TLSCert = true + + bytes, err = json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + }) + + It("restarts nodes one at a time in same org", func() { + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPCAS).Name(org1ca.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(func() bool { + restartConfig := GetRestartConfigFor("ca") + if restartConfig == nil { + return false + } + if len(restartConfig.Queues[""]) != 0 { + return false + } + if restartConfig.Log["org1ca"] == nil || len(restartConfig.Log["org1ca"]) != 1 { + return false + } + + return true + }).Should(Equal(true)) + }) + }) + + When("ca was restarted less than 10 min ago for config override", func() { + BeforeEach(func() { + // Create operator-config map to indicate that peer was restarted recently for ecert reenroll + restartTime = time.Now().UTC().Format(time.RFC3339) + CreateOrUpdateOperatorConfig(ca.Name, restart.CONFIGOVERRIDE, restartTime) + + Eventually(func() bool { + _, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + if err != nil { + return false + } + return true + }).Should(Equal(true)) + }) + + It("does not restart ca when config override occurs", func() { + override := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + Version: "1.4.8", + }, + } + overrideBytes, err := json.Marshal(override) + Expect(err).NotTo(HaveOccurred()) + ca.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: overrideBytes}, + } + + bytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + + result := ibpCRClient.Patch(types.MergePatchType).Namespace(namespace).Resource(IBPCAS).Name(org1ca.Name).Body(bytes).Do(context.TODO()) + Expect(result.Error()).NotTo(HaveOccurred()) + + Eventually(org1ca.PodIsRunning).Should((Equal(true))) + + By("not restarting ca pod", func() { + Eventually(func() bool { + pods := org1ca.GetPods() + if len(pods) != 1 { + return false + } + + newPodName := pods[0].Name + if newPodName == podName { + return true + } + + return false + }).Should(Equal(true)) + }) + + By("adding a pending restart request to config map", func() { + Eventually(func() bool { + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cfg := &restart.Config{} + err = json.Unmarshal(cm.BinaryData["restart-config.yaml"], cfg) + Expect(err).NotTo(HaveOccurred()) + + status := cfg.Instances[ca.Name].Requests[restart.CONFIGOVERRIDE].Status + lastTimestamp := cfg.Instances[ca.Name].Requests[restart.CONFIGOVERRIDE].LastActionTimestamp + if status == restart.Pending && lastTimestamp == restartTime { + return true + } + + return false + }).Should(Equal(true)) + }) + }) + }) + }) +}) + +func CreateOrUpdateOperatorConfig(instance string, reason restart.Reason, lastRestart string) { + oldCM := GetOperatorConfigMap(instance, reason, lastRestart) + + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "operator-config", metav1.GetOptions{}) + if k8serrors.IsNotFound(err) { + _, err = kclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), oldCM, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } else { + + cm.BinaryData = oldCM.BinaryData + _, err = kclient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } +} + +func GetOperatorConfigMap(instance string, reason restart.Reason, lastRestart string) *corev1.ConfigMap { + cfg := &restart.Config{ + Instances: map[string]*restart.Restart{ + instance: { + Requests: map[restart.Reason]*restart.Request{ + reason: { + LastActionTimestamp: lastRestart, + }, + }, + }, + }, + } + bytes, err := json.Marshal(cfg) + Expect(err).NotTo(HaveOccurred()) + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "operator-config", + Namespace: namespace, + }, + BinaryData: map[string][]byte{ + "restart-config.yaml": bytes, + }, + } +} + +func ClearOperatorConfig() { + err := kclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), "operator-config", *metav1.NewDeleteOptions(0)) + if !k8serrors.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred()) + } +} + +func GetBackup(certType, name string) *common.Backup { + backupSecret, err := kclient.CoreV1().Secrets(namespace).Get(context.TODO(), fmt.Sprintf("%s-crypto-backup", name), metav1.GetOptions{}) + if err != nil { + Expect(k8serrors.IsNotFound(err)).To(Equal(true)) + return &common.Backup{} + } + + backup := &common.Backup{} + key := fmt.Sprintf("%s-backup.json", certType) + err = json.Unmarshal(backupSecret.Data[key], backup) + Expect(err).NotTo(HaveOccurred()) + + return backup +} + +func GetRestartConfigFor(componentType string) *staggerrestarts.RestartConfig { + cmName := componentType + "-restart-config" + cm, err := kclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), cmName, metav1.GetOptions{}) + if err != nil { + return nil + } + + restartConfig := &staggerrestarts.RestartConfig{} + err = json.Unmarshal(cm.BinaryData["restart-config.yaml"], restartConfig) + Expect(err).NotTo(HaveOccurred()) + + return restartConfig +} diff --git a/main.go b/main.go new file mode 100644 index 00000000..877a1f4f --- /dev/null +++ b/main.go @@ -0,0 +1,198 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "path/filepath" + "time" + + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/command" + cainit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + + ibpv1beta1 "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + // +kubebuilder:scaffold:imports +) + +const ( + defaultConfigs = "../../defaultconfig" + defaultPeerDef = "../../definitions/peer" + defaultCADef = "../../definitions/ca" + defaultOrdererDef = "../../definitions/orderer" + defaultConsoleDef = "../../definitions/console" +) + +var log = logf.Log.WithName("cmd") + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(ibpv1beta1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func main() { + + operatorCfg := &config.Config{} + + setDefaultCADefinitions(operatorCfg) + setDefaultPeerDefinitions(operatorCfg) + setDefaultOrdererDefinitions(operatorCfg) + setDefaultConsoleDefinitions(operatorCfg) + + operatorCfg.Operator.SetDefaults() + + if err := command.Operator(operatorCfg, true); err != nil { + log.Error(err, "failed to start operator") + time.Sleep(15 * time.Second) + } + + // TODO + // if err = (&ibpca.IBPCAReconciler{ + // Client: mgr.GetClient(), + // Log: ctrl.Log.WithName("controllers").WithName("IBPCA"), + // Scheme: mgr.GetScheme(), + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "IBPCA") + // os.Exit(1) + // } + // if err = (&controllers.IBPPeerReconciler{ + // Client: mgr.GetClient(), + // Log: ctrl.Log.WithName("controllers").WithName("IBPPeer"), + // Scheme: mgr.GetScheme(), + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "IBPPeer") + // os.Exit(1) + // } + // if err = (&controllers.IBPOrdererReconciler{ + // Client: mgr.GetClient(), + // Log: ctrl.Log.WithName("controllers").WithName("IBPOrderer"), + // Scheme: mgr.GetScheme(), + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "IBPOrderer") + // os.Exit(1) + // } + // if err = (&controllers.IBPConsoleReconciler{ + // Client: mgr.GetClient(), + // Log: ctrl.Log.WithName("controllers").WithName("IBPConsole"), + // Scheme: mgr.GetScheme(), + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "IBPConsole") + // os.Exit(1) + // } + // +kubebuilder:scaffold:builder +} + +func setDefaultCADefinitions(cfg *config.Config) { + cfg.CAInitConfig = &cainit.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "ca/ca.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "ca/tlsca.yaml"), + DeploymentFile: filepath.Join(defaultCADef, "deployment.yaml"), + PVCFile: filepath.Join(defaultCADef, "pvc.yaml"), + ServiceFile: filepath.Join(defaultCADef, "service.yaml"), + RoleFile: filepath.Join(defaultCADef, "role.yaml"), + ServiceAccountFile: filepath.Join(defaultCADef, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(defaultCADef, "rolebinding.yaml"), + ConfigMapFile: filepath.Join(defaultCADef, "configmap-caoverride.yaml"), + IngressFile: filepath.Join(defaultCADef, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(defaultCADef, "ingressv1beta1.yaml"), + RouteFile: filepath.Join(defaultCADef, "route.yaml"), + SharedPath: "/tmp/data", + } +} + +func setDefaultPeerDefinitions(cfg *config.Config) { + cfg.PeerInitConfig = &peerinit.Config{ + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + InterOUFile: filepath.Join(defaultConfigs, "peer/ouconfig-inter.yaml"), + CorePeerFile: filepath.Join(defaultConfigs, "peer/core.yaml"), + CorePeerV2File: filepath.Join(defaultConfigs, "peer/v2/core.yaml"), + DeploymentFile: filepath.Join(defaultPeerDef, "deployment.yaml"), + PVCFile: filepath.Join(defaultPeerDef, "pvc.yaml"), + CouchDBPVCFile: filepath.Join(defaultPeerDef, "couchdb-pvc.yaml"), + ServiceFile: filepath.Join(defaultPeerDef, "service.yaml"), + RoleFile: filepath.Join(defaultPeerDef, "role.yaml"), + ServiceAccountFile: filepath.Join(defaultPeerDef, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(defaultPeerDef, "rolebinding.yaml"), + FluentdConfigMapFile: filepath.Join(defaultPeerDef, "fluentd-configmap.yaml"), + CouchContainerFile: filepath.Join(defaultPeerDef, "couchdb.yaml"), + CouchInitContainerFile: filepath.Join(defaultPeerDef, "couchdb-init.yaml"), + IngressFile: filepath.Join(defaultPeerDef, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(defaultPeerDef, "ingressv1beta1.yaml"), + CCLauncherFile: filepath.Join(defaultPeerDef, "chaincode-launcher.yaml"), + RouteFile: filepath.Join(defaultPeerDef, "route.yaml"), + StoragePath: "/tmp/peerinit", + } +} + +func setDefaultOrdererDefinitions(cfg *config.Config) { + cfg.OrdererInitConfig = &ordererinit.Config{ + OrdererV2File: filepath.Join(defaultConfigs, "orderer/v2/orderer.yaml"), + OrdererV24File: filepath.Join(defaultConfigs, "orderer/v24/orderer.yaml"), + OrdererFile: filepath.Join(defaultConfigs, "orderer/orderer.yaml"), + ConfigTxFile: filepath.Join(defaultConfigs, "orderer/configtx.yaml"), + OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), + InterOUFile: filepath.Join(defaultConfigs, "orderer/ouconfig-inter.yaml"), + DeploymentFile: filepath.Join(defaultOrdererDef, "deployment.yaml"), + PVCFile: filepath.Join(defaultOrdererDef, "pvc.yaml"), + ServiceFile: filepath.Join(defaultOrdererDef, "service.yaml"), + CMFile: filepath.Join(defaultOrdererDef, "configmap.yaml"), + RoleFile: filepath.Join(defaultOrdererDef, "role.yaml"), + ServiceAccountFile: filepath.Join(defaultOrdererDef, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(defaultOrdererDef, "rolebinding.yaml"), + IngressFile: filepath.Join(defaultOrdererDef, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(defaultOrdererDef, "ingressv1beta1.yaml"), + RouteFile: filepath.Join(defaultOrdererDef, "route.yaml"), + StoragePath: "/tmp/ordererinit", + } +} + +func setDefaultConsoleDefinitions(cfg *config.Config) { + cfg.ConsoleInitConfig = &config.ConsoleConfig{ + DeploymentFile: filepath.Join(defaultConsoleDef, "deployment.yaml"), + PVCFile: filepath.Join(defaultConsoleDef, "pvc.yaml"), + ServiceFile: filepath.Join(defaultConsoleDef, "service.yaml"), + DeployerServiceFile: filepath.Join(defaultConsoleDef, "deployer-service.yaml"), + CMFile: filepath.Join(defaultConsoleDef, "configmap.yaml"), + ConsoleCMFile: filepath.Join(defaultConsoleDef, "console-configmap.yaml"), + DeployerCMFile: filepath.Join(defaultConsoleDef, "deployer-configmap.yaml"), + RoleFile: filepath.Join(defaultConsoleDef, "role.yaml"), + ServiceAccountFile: filepath.Join(defaultConsoleDef, "serviceaccount.yaml"), + RoleBindingFile: filepath.Join(defaultConsoleDef, "rolebinding.yaml"), + IngressFile: filepath.Join(defaultConsoleDef, "ingress.yaml"), + Ingressv1beta1File: filepath.Join(defaultConsoleDef, "ingressv1beta1.yaml"), + RouteFile: filepath.Join(defaultConsoleDef, "route.yaml"), + NetworkPolicyIngressFile: filepath.Join(defaultConsoleDef, "networkpolicy-ingress.yaml"), + NetworkPolicyDenyAllFile: filepath.Join(defaultConsoleDef, "networkpolicy-denyall.yaml"), + } +} diff --git a/operatorconfig/config.go b/operatorconfig/config.go new file mode 100644 index 00000000..961a3513 --- /dev/null +++ b/operatorconfig/config.go @@ -0,0 +1,55 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorconfig + +import ( + cainit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + "github.com/go-logr/logr" +) + +type Config struct { + CAInitConfig *cainit.Config + PeerInitConfig *peerinit.Config + OrdererInitConfig *ordererinit.Config + ConsoleInitConfig *ConsoleConfig + Offering offering.Type + Operator Operator + Logger *logr.Logger +} + +type ConsoleConfig struct { + DeploymentFile string + NetworkPolicyIngressFile string + NetworkPolicyDenyAllFile string + ServiceFile string + DeployerServiceFile string + PVCFile string + CMFile string + ConsoleCMFile string + DeployerCMFile string + RoleFile string + RoleBindingFile string + ServiceAccountFile string + IngressFile string + Ingressv1beta1File string + RouteFile string +} diff --git a/operatorconfig/operator.go b/operatorconfig/operator.go new file mode 100644 index 00000000..2b848d25 --- /dev/null +++ b/operatorconfig/operator.go @@ -0,0 +1,201 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorconfig + +import ( + "context" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + cainit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/vrischmann/envconfig" + + corev1 "k8s.io/api/core/v1" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/yaml" +) + +// Client defines interface for making GET calls to kubernetes API server +type Client interface { + Get(ctx context.Context, key k8sclient.ObjectKey, obj k8sclient.Object) error +} + +// LoadFromConfigMap will read config map and return back operator config built on top by +// updating config values based on environment variables. +func LoadFromConfigMap(nn k8sclient.ObjectKey, key string, client Client, operator *Operator) error { + cm := &corev1.ConfigMap{} + err := client.Get(context.TODO(), nn, cm) + if k8sclient.IgnoreNotFound(err) != nil { + return err + } + + err = load([]byte(cm.Data[key]), operator) + if err != nil { + return err + } + + return nil +} + +func load(config []byte, operator *Operator) error { + // If no config bytes passed, we can assume that a config file (config map) for operator + // does not exist and we can skip the unmarshal step. + if config != nil && len(config) > 0 { + if err := yaml.Unmarshal(config, operator); err != nil { + return err + } + } + + opts := envconfig.Options{ + Prefix: "IBPOPERATOR", + AllOptional: true, + LeaveNil: true, + } + + if err := envconfig.InitWithOptions(operator, opts); err != nil { + return err + } + + return nil +} + +// Operator defines operator configuration parameters +type Operator struct { + Orderer Orderer `json:"orderer" yaml:"orderer"` + Peer Peer `json:"peer" yaml:"peer"` + CA CA `json:"ca" yaml:"ca"` + Console Console `json:"console" yaml:"console"` + Restart Restart `json:"restart" yaml:"restart"` + Versions *deployer.Versions `json:"versions,omitempty" yaml:"versions,omitempty"` + Globals Globals `json:"globals,omitempty" yaml:"globals,omitempty" envconfig:"optional"` + Debug Debug `json:"debug" yaml:"debug"` +} + +// CA defines configurable properties for CA custom resource +type CA struct { + Timeouts CATimeouts `json:"timeouts" yaml:"timeouts"` +} + +// CATimeouts defines timeouts properties that can be configured +type CATimeouts struct { + HSMInitJob cainit.HSMInitJobTimeouts `json:"hsmInitJob" yaml:"hsmInitJob"` +} + +type Orderer struct { + Timeouts OrdererTimeouts `json:"timeouts" yaml:"timeouts"` + Renewals OrdererRenewals `json:"renewals" yaml:"renewals"` + DisableProbes string `json:"disableProbes" yaml:"disableProbes"` +} + +type OrdererTimeouts struct { + SecretPoll common.Duration `json:"secretPollTimeout" yaml:"secretPollTimeout"` + EnrollJob enroller.HSMEnrollJobTimeouts `json:"enrollJob" yaml:"enrollJob"` +} + +type OrdererRenewals struct { + DisableTLScert bool `json:"disableTLScert" yaml:"disableTLScert"` +} + +type Peer struct { + Timeouts PeerTimeouts `json:"timeouts" yaml:"timeouts"` +} + +type PeerTimeouts struct { + DBMigration DBMigrationTimeouts `json:"dbMigration" yaml:"dbMigration"` + EnrollJob enroller.HSMEnrollJobTimeouts `json:"enrollJob" yaml:"enrollJob"` +} + +type DBMigrationTimeouts struct { + CouchDBStartUp common.Duration `json:"couchDBStartUp" yaml:"couchDbStartUp"` + JobStart common.Duration `json:"jobStart" yaml:"jobStart"` + JobCompletion common.Duration `json:"jobCompletion" yaml:"jobCompletion"` + ReplicaChange common.Duration `json:"replicaChange" yaml:"replicaChange"` + PodDeletion common.Duration `json:"podDeletion" yaml:"podDeletion"` + PodStart common.Duration `json:"podStart" yaml:"podStart"` +} + +type Restart struct { + WaitTime common.Duration `json:"waitTime" yaml:"waitTime"` + Disable DisableRestart `json:"disable" yaml:"disable"` + Timeout common.Duration `json:"timeout" yaml:"timeout"` +} + +type DisableRestart struct { + Components bool `json:"components" yaml:"components"` +} + +type Globals struct { + SecurityContext *container.SecurityContext `json:"securityContext,omitempty" yaml:"securityContext,omitempty"` + AllowKubernetesEighteen string `json:"allowKubernetesEighteen,omitempty" yaml:"allowKubernetesEighteen,omitempty"` +} + +type Debug struct { + DisableDeploymentChecks string `json:"disableDeploymentChecks,omitempty" yaml:"disableDeploymentChecks,omitempty"` +} + +type Console struct { + ApplyNetworkPolicy string `json:"applyNetworkPolicy" yaml:"applyNetworkPolicy"` +} + +// SetDefaults will set defaults as defined by to the operator configuration settings +func (o *Operator) SetDefaults() { + *o = Operator{ + Orderer: Orderer{ + Timeouts: OrdererTimeouts{ + SecretPoll: common.MustParseDuration("30s"), + EnrollJob: enroller.HSMEnrollJobTimeouts{ + JobStart: common.MustParseDuration("90s"), + JobCompletion: common.MustParseDuration("90s"), + }, + }, + }, + Peer: Peer{ + Timeouts: PeerTimeouts{ + DBMigration: DBMigrationTimeouts{ + CouchDBStartUp: common.MustParseDuration("90s"), + JobStart: common.MustParseDuration("90s"), + JobCompletion: common.MustParseDuration("90s"), + ReplicaChange: common.MustParseDuration("90s"), + PodDeletion: common.MustParseDuration("90s"), + PodStart: common.MustParseDuration("90s"), + }, + EnrollJob: enroller.HSMEnrollJobTimeouts{ + JobStart: common.MustParseDuration("90s"), + JobCompletion: common.MustParseDuration("90s"), + }, + }, + }, + CA: CA{ + Timeouts: CATimeouts{ + HSMInitJob: cainit.HSMInitJobTimeouts{ + JobStart: common.MustParseDuration("90s"), + JobCompletion: common.MustParseDuration("90s"), + }, + }, + }, + Restart: Restart{ + WaitTime: common.MustParseDuration("10m"), + Timeout: common.MustParseDuration("5m"), + }, + Versions: getDefaultVersions(), + } +} diff --git a/operatorconfig/versions.go b/operatorconfig/versions.go new file mode 100644 index 00000000..3069176f --- /dev/null +++ b/operatorconfig/versions.go @@ -0,0 +1,75 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorconfig + +import "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + +const ( + InitImage = "registry.access.redhat.com/ubi8/ubi-minimal" + LatestTag = "latest" + FabricCAVersion = "1.5.3" + FabricVersion = "2.4.3" +) + +func getDefaultVersions() *deployer.Versions { + return &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.5.3-1": { + Default: true, + Version: "1.5.3-1", + Image: deployer.CAImages{ + CAInitImage: InitImage, + CAInitTag: LatestTag, + CAImage: "hyperledger/fabric-ca", + CATag: FabricCAVersion, + }, + }, + }, + Peer: map[string]deployer.VersionPeer{ + "2.4.3-1": { + Default: true, + Version: "2.4.3-1", + Image: deployer.PeerImages{ + PeerInitImage: InitImage, + PeerInitTag: LatestTag, + PeerImage: "hyperledger/fabric-peer", + PeerTag: FabricVersion, + CouchDBImage: "couchdb", + CouchDBTag: "3.1.2", + GRPCWebImage: "ghcr.io/hyperledger-labs/grpc-web", + GRPCWebTag: LatestTag, + }, + }, + }, + Orderer: map[string]deployer.VersionOrderer{ + "2.4.3-1": { + Default: true, + Version: "2.4.3-1", + Image: deployer.OrdererImages{ + OrdererInitImage: InitImage, + OrdererInitTag: LatestTag, + OrdererImage: "hyperledger/fabric-orderer", + OrdererTag: FabricVersion, + GRPCWebImage: "ghcr.io/hyperledger-labs/grpc-web", + GRPCWebTag: LatestTag, + }, + }, + }, + } +} diff --git a/pkg/action/action.go b/pkg/action/action.go new file mode 100644 index 00000000..7e72e18b --- /dev/null +++ b/pkg/action/action.go @@ -0,0 +1,95 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action + +import ( + "context" + "fmt" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("action") + +// By triggering a component restart by updating its annotations instead of deleting +// the deployment, components will be restarted with the rolling update strategy +// unless their deployments specify a recreate strategy. This will allow ibpconsole +// components with rolling update strategies to not have any downtime. +func Restart(client k8sclient.Client, name, namespace string) error { + deployment := &appsv1.Deployment{} + err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, deployment) + if err != nil { + return err + } + + if deployment == nil { + return fmt.Errorf("failed to get deployment %s", name) + } + + if deployment.Spec.Template.ObjectMeta.Annotations == nil { + deployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + + err = client.Patch(context.TODO(), deployment, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: &appsv1.Deployment{}, + Strategy: runtimeclient.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil +} + +//go:generate counterfeiter -o mocks/reenroller.go -fake-name Reenroller . Reenroller + +type Reenroller interface { + RenewCert(certType common.SecretType, instance runtime.Object, newKey bool) error +} + +//go:generate counterfeiter -o mocks/reenrollinstance.go -fake-name ReenrollInstance . ReenrollInstance + +type ReenrollInstance interface { + runtime.Object + v1.Object + ResetEcertReenroll() + ResetTLSReenroll() +} + +func Reenroll(reenroller Reenroller, client k8sclient.Client, certType common.SecretType, instance ReenrollInstance, newKey bool) error { + err := reenroller.RenewCert(certType, instance, newKey) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/action/action_suite_test.go b/pkg/action/action_suite_test.go new file mode 100644 index 00000000..7f03718b --- /dev/null +++ b/pkg/action/action_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestAction(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Action Suite") +} diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go new file mode 100644 index 00000000..0226d400 --- /dev/null +++ b/pkg/action/action_test.go @@ -0,0 +1,100 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + "github.com/IBM-Blockchain/fabric-operator/pkg/action/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" +) + +var _ = Describe("actions", func() { + var ( + client *controllermocks.Client + ) + + BeforeEach(func() { + client = &controllermocks.Client{} + }) + + Context("restart", func() { + + It("returns an error if failed to get deployment", func() { + client.GetReturns(errors.New("get error")) + err := action.Restart(client, "name", "namespace") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("get error")) + }) + + It("returns error if fails to patch deployment", func() { + client.PatchReturns(errors.New("patch error")) + err := action.Restart(client, "name", "namespace") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("patch error")) + }) + + It("restarts deployment by updating annotations", func() { + err := action.Restart(client, "name", "namespace") + Expect(err).NotTo(HaveOccurred()) + _, dep, _, _ := client.PatchArgsForCall(0) + deployment := dep.(*appsv1.Deployment) + annotation := deployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] + Expect(annotation).NotTo(Equal("")) + + }) + }) + + Context("reenroll", func() { + var ( + instance *mocks.ReenrollInstance + reenroller *mocks.Reenroller + ) + + BeforeEach(func() { + reenroller = &mocks.Reenroller{} + instance = &mocks.ReenrollInstance{} + }) + + It("returns an error if pod deletion fails", func() { + reenroller.RenewCertReturns(errors.New("renew failed")) + err := action.Reenroll(reenroller, client, common.ECERT, instance, true) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("renew failed"))) + }) + + It("reenrolls ecert successfully", func() { + err := action.Reenroll(reenroller, client, common.ECERT, instance, true) + Expect(err).NotTo(HaveOccurred()) + Expect(reenroller.RenewCertCallCount()).To(Equal(1)) + }) + + It("reenrolls TLS successfully", func() { + err := action.Reenroll(reenroller, client, common.TLS, instance, true) + Expect(err).NotTo(HaveOccurred()) + Expect(reenroller.RenewCertCallCount()).To(Equal(1)) + }) + }) +}) diff --git a/pkg/action/enroll.go b/pkg/action/enroll.go new file mode 100644 index 00000000..9eb7ebc5 --- /dev/null +++ b/pkg/action/enroll.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +//go:generate counterfeiter -o mocks/enrollinstance.go -fake-name EnrollInstance . EnrollInstance + +type EnrollInstance interface { + runtime.Object + metav1.Object + IsHSMEnabled() bool + UsingHSMProxy() bool + GetConfigOverride() (interface{}, error) + EnrollerImage() string + GetPullSecrets() []corev1.LocalObjectReference + PVCName() string + GetResource(current.Component) corev1.ResourceRequirements +} + +func Enroll(instance EnrollInstance, enrollment *current.Enrollment, storagePath string, client k8sclient.Client, scheme *runtime.Scheme, ecert bool, timeouts enroller.HSMEnrollJobTimeouts) (*config.Response, error) { + log.Info(fmt.Sprintf("Enroll action performing enrollment for identity: %s", enrollment.EnrollID)) + + var err error + defer os.RemoveAll(storagePath) + + bytes, err := enrollment.GetCATLSBytes() + if err != nil { + return nil, err + } + + caClient := enroller.NewFabCAClient(enrollment, storagePath, nil, bytes) + certEnroller := enroller.New(enroller.NewSWEnroller(caClient)) + + // Only check if HSM enroller is needed if the request is for an ecert, TLS cert enrollment is not supported + // via HSM + if ecert { + certEnroller, err = enroller.Factory(enrollment, client, instance, storagePath, scheme, bytes, timeouts) + if err != nil { + return nil, err + } + } + + crypto, err := config.GenerateCrypto(certEnroller) + if err != nil { + return nil, errors.Wrap(err, "failed to generate crypto") + } + + return crypto, nil +} diff --git a/pkg/action/enroll_test.go b/pkg/action/enroll_test.go new file mode 100644 index 00000000..3d5bbd0f --- /dev/null +++ b/pkg/action/enroll_test.go @@ -0,0 +1,62 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + "github.com/IBM-Blockchain/fabric-operator/pkg/action/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" +) + +var _ = Describe("enroll", func() { + var ( + instance *mocks.EnrollInstance + enrollment *current.Enrollment + client = &controllermocks.Client{} + ) + + BeforeEach(func() { + instance = &mocks.EnrollInstance{} + enrollment = ¤t.Enrollment{ + CAHost: "cahost", + CAPort: "7054", + EnrollID: "id", + EnrollSecret: "secret", + CATLS: ¤t.CATLS{ + CACert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + }, + } + }) + + Context("enrolls for certificate/key", func() { + It("fails to get crypto when ca is unreachable", func() { + _, err := action.Enroll(instance, enrollment, "../../testdata/tmp", client, &runtime.Scheme{}, true, enroller.HSMEnrollJobTimeouts{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("failed to generate crypto"))) + }) + }) +}) + +type fakeConfig struct{} diff --git a/pkg/action/mocks/deploymentreset.go b/pkg/action/mocks/deploymentreset.go new file mode 100644 index 00000000..83614cca --- /dev/null +++ b/pkg/action/mocks/deploymentreset.go @@ -0,0 +1,264 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + v1 "k8s.io/api/apps/v1" + v1a "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type DeploymentReset struct { + DeploymentStatusStub func(v1a.Object) (v1.DeploymentStatus, error) + deploymentStatusMutex sync.RWMutex + deploymentStatusArgsForCall []struct { + arg1 v1a.Object + } + deploymentStatusReturns struct { + result1 v1.DeploymentStatus + result2 error + } + deploymentStatusReturnsOnCall map[int]struct { + result1 v1.DeploymentStatus + result2 error + } + GetStub func(v1a.Object) (client.Object, error) + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 v1a.Object + } + getReturns struct { + result1 client.Object + result2 error + } + getReturnsOnCall map[int]struct { + result1 client.Object + result2 error + } + GetSchemeStub func() *runtime.Scheme + getSchemeMutex sync.RWMutex + getSchemeArgsForCall []struct { + } + getSchemeReturns struct { + result1 *runtime.Scheme + } + getSchemeReturnsOnCall map[int]struct { + result1 *runtime.Scheme + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DeploymentReset) DeploymentStatus(arg1 v1a.Object) (v1.DeploymentStatus, error) { + fake.deploymentStatusMutex.Lock() + ret, specificReturn := fake.deploymentStatusReturnsOnCall[len(fake.deploymentStatusArgsForCall)] + fake.deploymentStatusArgsForCall = append(fake.deploymentStatusArgsForCall, struct { + arg1 v1a.Object + }{arg1}) + stub := fake.DeploymentStatusStub + fakeReturns := fake.deploymentStatusReturns + fake.recordInvocation("DeploymentStatus", []interface{}{arg1}) + fake.deploymentStatusMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentReset) DeploymentStatusCallCount() int { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + return len(fake.deploymentStatusArgsForCall) +} + +func (fake *DeploymentReset) DeploymentStatusCalls(stub func(v1a.Object) (v1.DeploymentStatus, error)) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = stub +} + +func (fake *DeploymentReset) DeploymentStatusArgsForCall(i int) v1a.Object { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + argsForCall := fake.deploymentStatusArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentReset) DeploymentStatusReturns(result1 v1.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + fake.deploymentStatusReturns = struct { + result1 v1.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentReset) DeploymentStatusReturnsOnCall(i int, result1 v1.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + if fake.deploymentStatusReturnsOnCall == nil { + fake.deploymentStatusReturnsOnCall = make(map[int]struct { + result1 v1.DeploymentStatus + result2 error + }) + } + fake.deploymentStatusReturnsOnCall[i] = struct { + result1 v1.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentReset) Get(arg1 v1a.Object) (client.Object, error) { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 v1a.Object + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentReset) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *DeploymentReset) GetCalls(stub func(v1a.Object) (client.Object, error)) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *DeploymentReset) GetArgsForCall(i int) v1a.Object { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentReset) GetReturns(result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentReset) GetReturnsOnCall(i int, result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 client.Object + result2 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentReset) GetScheme() *runtime.Scheme { + fake.getSchemeMutex.Lock() + ret, specificReturn := fake.getSchemeReturnsOnCall[len(fake.getSchemeArgsForCall)] + fake.getSchemeArgsForCall = append(fake.getSchemeArgsForCall, struct { + }{}) + stub := fake.GetSchemeStub + fakeReturns := fake.getSchemeReturns + fake.recordInvocation("GetScheme", []interface{}{}) + fake.getSchemeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentReset) GetSchemeCallCount() int { + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + return len(fake.getSchemeArgsForCall) +} + +func (fake *DeploymentReset) GetSchemeCalls(stub func() *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = stub +} + +func (fake *DeploymentReset) GetSchemeReturns(result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + fake.getSchemeReturns = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentReset) GetSchemeReturnsOnCall(i int, result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + if fake.getSchemeReturnsOnCall == nil { + fake.getSchemeReturnsOnCall = make(map[int]struct { + result1 *runtime.Scheme + }) + } + fake.getSchemeReturnsOnCall[i] = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentReset) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DeploymentReset) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ action.DeploymentReset = new(DeploymentReset) diff --git a/pkg/action/mocks/enrollinstance.go b/pkg/action/mocks/enrollinstance.go new file mode 100644 index 00000000..ea4e370c --- /dev/null +++ b/pkg/action/mocks/enrollinstance.go @@ -0,0 +1,2321 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + v1a "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +type EnrollInstance struct { + DeepCopyObjectStub func() runtime.Object + deepCopyObjectMutex sync.RWMutex + deepCopyObjectArgsForCall []struct { + } + deepCopyObjectReturns struct { + result1 runtime.Object + } + deepCopyObjectReturnsOnCall map[int]struct { + result1 runtime.Object + } + EnrollerImageStub func() string + enrollerImageMutex sync.RWMutex + enrollerImageArgsForCall []struct { + } + enrollerImageReturns struct { + result1 string + } + enrollerImageReturnsOnCall map[int]struct { + result1 string + } + GetAnnotationsStub func() map[string]string + getAnnotationsMutex sync.RWMutex + getAnnotationsArgsForCall []struct { + } + getAnnotationsReturns struct { + result1 map[string]string + } + getAnnotationsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetClusterNameStub func() string + getClusterNameMutex sync.RWMutex + getClusterNameArgsForCall []struct { + } + getClusterNameReturns struct { + result1 string + } + getClusterNameReturnsOnCall map[int]struct { + result1 string + } + GetConfigOverrideStub func() (interface{}, error) + getConfigOverrideMutex sync.RWMutex + getConfigOverrideArgsForCall []struct { + } + getConfigOverrideReturns struct { + result1 interface{} + result2 error + } + getConfigOverrideReturnsOnCall map[int]struct { + result1 interface{} + result2 error + } + GetCreationTimestampStub func() v1.Time + getCreationTimestampMutex sync.RWMutex + getCreationTimestampArgsForCall []struct { + } + getCreationTimestampReturns struct { + result1 v1.Time + } + getCreationTimestampReturnsOnCall map[int]struct { + result1 v1.Time + } + GetDeletionGracePeriodSecondsStub func() *int64 + getDeletionGracePeriodSecondsMutex sync.RWMutex + getDeletionGracePeriodSecondsArgsForCall []struct { + } + getDeletionGracePeriodSecondsReturns struct { + result1 *int64 + } + getDeletionGracePeriodSecondsReturnsOnCall map[int]struct { + result1 *int64 + } + GetDeletionTimestampStub func() *v1.Time + getDeletionTimestampMutex sync.RWMutex + getDeletionTimestampArgsForCall []struct { + } + getDeletionTimestampReturns struct { + result1 *v1.Time + } + getDeletionTimestampReturnsOnCall map[int]struct { + result1 *v1.Time + } + GetFinalizersStub func() []string + getFinalizersMutex sync.RWMutex + getFinalizersArgsForCall []struct { + } + getFinalizersReturns struct { + result1 []string + } + getFinalizersReturnsOnCall map[int]struct { + result1 []string + } + GetGenerateNameStub func() string + getGenerateNameMutex sync.RWMutex + getGenerateNameArgsForCall []struct { + } + getGenerateNameReturns struct { + result1 string + } + getGenerateNameReturnsOnCall map[int]struct { + result1 string + } + GetGenerationStub func() int64 + getGenerationMutex sync.RWMutex + getGenerationArgsForCall []struct { + } + getGenerationReturns struct { + result1 int64 + } + getGenerationReturnsOnCall map[int]struct { + result1 int64 + } + GetLabelsStub func() map[string]string + getLabelsMutex sync.RWMutex + getLabelsArgsForCall []struct { + } + getLabelsReturns struct { + result1 map[string]string + } + getLabelsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetManagedFieldsStub func() []v1.ManagedFieldsEntry + getManagedFieldsMutex sync.RWMutex + getManagedFieldsArgsForCall []struct { + } + getManagedFieldsReturns struct { + result1 []v1.ManagedFieldsEntry + } + getManagedFieldsReturnsOnCall map[int]struct { + result1 []v1.ManagedFieldsEntry + } + GetNameStub func() string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetNamespaceStub func() string + getNamespaceMutex sync.RWMutex + getNamespaceArgsForCall []struct { + } + getNamespaceReturns struct { + result1 string + } + getNamespaceReturnsOnCall map[int]struct { + result1 string + } + GetObjectKindStub func() schema.ObjectKind + getObjectKindMutex sync.RWMutex + getObjectKindArgsForCall []struct { + } + getObjectKindReturns struct { + result1 schema.ObjectKind + } + getObjectKindReturnsOnCall map[int]struct { + result1 schema.ObjectKind + } + GetOwnerReferencesStub func() []v1.OwnerReference + getOwnerReferencesMutex sync.RWMutex + getOwnerReferencesArgsForCall []struct { + } + getOwnerReferencesReturns struct { + result1 []v1.OwnerReference + } + getOwnerReferencesReturnsOnCall map[int]struct { + result1 []v1.OwnerReference + } + GetPullSecretsStub func() []v1a.LocalObjectReference + getPullSecretsMutex sync.RWMutex + getPullSecretsArgsForCall []struct { + } + getPullSecretsReturns struct { + result1 []v1a.LocalObjectReference + } + getPullSecretsReturnsOnCall map[int]struct { + result1 []v1a.LocalObjectReference + } + GetResourceStub func(v1beta1.Component) v1a.ResourceRequirements + getResourceMutex sync.RWMutex + getResourceArgsForCall []struct { + arg1 v1beta1.Component + } + getResourceReturns struct { + result1 v1a.ResourceRequirements + } + getResourceReturnsOnCall map[int]struct { + result1 v1a.ResourceRequirements + } + GetResourceVersionStub func() string + getResourceVersionMutex sync.RWMutex + getResourceVersionArgsForCall []struct { + } + getResourceVersionReturns struct { + result1 string + } + getResourceVersionReturnsOnCall map[int]struct { + result1 string + } + GetSelfLinkStub func() string + getSelfLinkMutex sync.RWMutex + getSelfLinkArgsForCall []struct { + } + getSelfLinkReturns struct { + result1 string + } + getSelfLinkReturnsOnCall map[int]struct { + result1 string + } + GetUIDStub func() types.UID + getUIDMutex sync.RWMutex + getUIDArgsForCall []struct { + } + getUIDReturns struct { + result1 types.UID + } + getUIDReturnsOnCall map[int]struct { + result1 types.UID + } + IsHSMEnabledStub func() bool + isHSMEnabledMutex sync.RWMutex + isHSMEnabledArgsForCall []struct { + } + isHSMEnabledReturns struct { + result1 bool + } + isHSMEnabledReturnsOnCall map[int]struct { + result1 bool + } + PVCNameStub func() string + pVCNameMutex sync.RWMutex + pVCNameArgsForCall []struct { + } + pVCNameReturns struct { + result1 string + } + pVCNameReturnsOnCall map[int]struct { + result1 string + } + SetAnnotationsStub func(map[string]string) + setAnnotationsMutex sync.RWMutex + setAnnotationsArgsForCall []struct { + arg1 map[string]string + } + SetClusterNameStub func(string) + setClusterNameMutex sync.RWMutex + setClusterNameArgsForCall []struct { + arg1 string + } + SetCreationTimestampStub func(v1.Time) + setCreationTimestampMutex sync.RWMutex + setCreationTimestampArgsForCall []struct { + arg1 v1.Time + } + SetDeletionGracePeriodSecondsStub func(*int64) + setDeletionGracePeriodSecondsMutex sync.RWMutex + setDeletionGracePeriodSecondsArgsForCall []struct { + arg1 *int64 + } + SetDeletionTimestampStub func(*v1.Time) + setDeletionTimestampMutex sync.RWMutex + setDeletionTimestampArgsForCall []struct { + arg1 *v1.Time + } + SetFinalizersStub func([]string) + setFinalizersMutex sync.RWMutex + setFinalizersArgsForCall []struct { + arg1 []string + } + SetGenerateNameStub func(string) + setGenerateNameMutex sync.RWMutex + setGenerateNameArgsForCall []struct { + arg1 string + } + SetGenerationStub func(int64) + setGenerationMutex sync.RWMutex + setGenerationArgsForCall []struct { + arg1 int64 + } + SetLabelsStub func(map[string]string) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 map[string]string + } + SetManagedFieldsStub func([]v1.ManagedFieldsEntry) + setManagedFieldsMutex sync.RWMutex + setManagedFieldsArgsForCall []struct { + arg1 []v1.ManagedFieldsEntry + } + SetNameStub func(string) + setNameMutex sync.RWMutex + setNameArgsForCall []struct { + arg1 string + } + SetNamespaceStub func(string) + setNamespaceMutex sync.RWMutex + setNamespaceArgsForCall []struct { + arg1 string + } + SetOwnerReferencesStub func([]v1.OwnerReference) + setOwnerReferencesMutex sync.RWMutex + setOwnerReferencesArgsForCall []struct { + arg1 []v1.OwnerReference + } + SetResourceVersionStub func(string) + setResourceVersionMutex sync.RWMutex + setResourceVersionArgsForCall []struct { + arg1 string + } + SetSelfLinkStub func(string) + setSelfLinkMutex sync.RWMutex + setSelfLinkArgsForCall []struct { + arg1 string + } + SetUIDStub func(types.UID) + setUIDMutex sync.RWMutex + setUIDArgsForCall []struct { + arg1 types.UID + } + UsingHSMProxyStub func() bool + usingHSMProxyMutex sync.RWMutex + usingHSMProxyArgsForCall []struct { + } + usingHSMProxyReturns struct { + result1 bool + } + usingHSMProxyReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *EnrollInstance) DeepCopyObject() runtime.Object { + fake.deepCopyObjectMutex.Lock() + ret, specificReturn := fake.deepCopyObjectReturnsOnCall[len(fake.deepCopyObjectArgsForCall)] + fake.deepCopyObjectArgsForCall = append(fake.deepCopyObjectArgsForCall, struct { + }{}) + stub := fake.DeepCopyObjectStub + fakeReturns := fake.deepCopyObjectReturns + fake.recordInvocation("DeepCopyObject", []interface{}{}) + fake.deepCopyObjectMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) DeepCopyObjectCallCount() int { + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + return len(fake.deepCopyObjectArgsForCall) +} + +func (fake *EnrollInstance) DeepCopyObjectCalls(stub func() runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = stub +} + +func (fake *EnrollInstance) DeepCopyObjectReturns(result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + fake.deepCopyObjectReturns = struct { + result1 runtime.Object + }{result1} +} + +func (fake *EnrollInstance) DeepCopyObjectReturnsOnCall(i int, result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + if fake.deepCopyObjectReturnsOnCall == nil { + fake.deepCopyObjectReturnsOnCall = make(map[int]struct { + result1 runtime.Object + }) + } + fake.deepCopyObjectReturnsOnCall[i] = struct { + result1 runtime.Object + }{result1} +} + +func (fake *EnrollInstance) EnrollerImage() string { + fake.enrollerImageMutex.Lock() + ret, specificReturn := fake.enrollerImageReturnsOnCall[len(fake.enrollerImageArgsForCall)] + fake.enrollerImageArgsForCall = append(fake.enrollerImageArgsForCall, struct { + }{}) + stub := fake.EnrollerImageStub + fakeReturns := fake.enrollerImageReturns + fake.recordInvocation("EnrollerImage", []interface{}{}) + fake.enrollerImageMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) EnrollerImageCallCount() int { + fake.enrollerImageMutex.RLock() + defer fake.enrollerImageMutex.RUnlock() + return len(fake.enrollerImageArgsForCall) +} + +func (fake *EnrollInstance) EnrollerImageCalls(stub func() string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = stub +} + +func (fake *EnrollInstance) EnrollerImageReturns(result1 string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = nil + fake.enrollerImageReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) EnrollerImageReturnsOnCall(i int, result1 string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = nil + if fake.enrollerImageReturnsOnCall == nil { + fake.enrollerImageReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.enrollerImageReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetAnnotations() map[string]string { + fake.getAnnotationsMutex.Lock() + ret, specificReturn := fake.getAnnotationsReturnsOnCall[len(fake.getAnnotationsArgsForCall)] + fake.getAnnotationsArgsForCall = append(fake.getAnnotationsArgsForCall, struct { + }{}) + stub := fake.GetAnnotationsStub + fakeReturns := fake.getAnnotationsReturns + fake.recordInvocation("GetAnnotations", []interface{}{}) + fake.getAnnotationsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetAnnotationsCallCount() int { + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + return len(fake.getAnnotationsArgsForCall) +} + +func (fake *EnrollInstance) GetAnnotationsCalls(stub func() map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = stub +} + +func (fake *EnrollInstance) GetAnnotationsReturns(result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + fake.getAnnotationsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *EnrollInstance) GetAnnotationsReturnsOnCall(i int, result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + if fake.getAnnotationsReturnsOnCall == nil { + fake.getAnnotationsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getAnnotationsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *EnrollInstance) GetClusterName() string { + fake.getClusterNameMutex.Lock() + ret, specificReturn := fake.getClusterNameReturnsOnCall[len(fake.getClusterNameArgsForCall)] + fake.getClusterNameArgsForCall = append(fake.getClusterNameArgsForCall, struct { + }{}) + stub := fake.GetClusterNameStub + fakeReturns := fake.getClusterNameReturns + fake.recordInvocation("GetClusterName", []interface{}{}) + fake.getClusterNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetClusterNameCallCount() int { + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + return len(fake.getClusterNameArgsForCall) +} + +func (fake *EnrollInstance) GetClusterNameCalls(stub func() string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = stub +} + +func (fake *EnrollInstance) GetClusterNameReturns(result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + fake.getClusterNameReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetClusterNameReturnsOnCall(i int, result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + if fake.getClusterNameReturnsOnCall == nil { + fake.getClusterNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getClusterNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetConfigOverride() (interface{}, error) { + fake.getConfigOverrideMutex.Lock() + ret, specificReturn := fake.getConfigOverrideReturnsOnCall[len(fake.getConfigOverrideArgsForCall)] + fake.getConfigOverrideArgsForCall = append(fake.getConfigOverrideArgsForCall, struct { + }{}) + stub := fake.GetConfigOverrideStub + fakeReturns := fake.getConfigOverrideReturns + fake.recordInvocation("GetConfigOverride", []interface{}{}) + fake.getConfigOverrideMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *EnrollInstance) GetConfigOverrideCallCount() int { + fake.getConfigOverrideMutex.RLock() + defer fake.getConfigOverrideMutex.RUnlock() + return len(fake.getConfigOverrideArgsForCall) +} + +func (fake *EnrollInstance) GetConfigOverrideCalls(stub func() (interface{}, error)) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = stub +} + +func (fake *EnrollInstance) GetConfigOverrideReturns(result1 interface{}, result2 error) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = nil + fake.getConfigOverrideReturns = struct { + result1 interface{} + result2 error + }{result1, result2} +} + +func (fake *EnrollInstance) GetConfigOverrideReturnsOnCall(i int, result1 interface{}, result2 error) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = nil + if fake.getConfigOverrideReturnsOnCall == nil { + fake.getConfigOverrideReturnsOnCall = make(map[int]struct { + result1 interface{} + result2 error + }) + } + fake.getConfigOverrideReturnsOnCall[i] = struct { + result1 interface{} + result2 error + }{result1, result2} +} + +func (fake *EnrollInstance) GetCreationTimestamp() v1.Time { + fake.getCreationTimestampMutex.Lock() + ret, specificReturn := fake.getCreationTimestampReturnsOnCall[len(fake.getCreationTimestampArgsForCall)] + fake.getCreationTimestampArgsForCall = append(fake.getCreationTimestampArgsForCall, struct { + }{}) + stub := fake.GetCreationTimestampStub + fakeReturns := fake.getCreationTimestampReturns + fake.recordInvocation("GetCreationTimestamp", []interface{}{}) + fake.getCreationTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetCreationTimestampCallCount() int { + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + return len(fake.getCreationTimestampArgsForCall) +} + +func (fake *EnrollInstance) GetCreationTimestampCalls(stub func() v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = stub +} + +func (fake *EnrollInstance) GetCreationTimestampReturns(result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + fake.getCreationTimestampReturns = struct { + result1 v1.Time + }{result1} +} + +func (fake *EnrollInstance) GetCreationTimestampReturnsOnCall(i int, result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + if fake.getCreationTimestampReturnsOnCall == nil { + fake.getCreationTimestampReturnsOnCall = make(map[int]struct { + result1 v1.Time + }) + } + fake.getCreationTimestampReturnsOnCall[i] = struct { + result1 v1.Time + }{result1} +} + +func (fake *EnrollInstance) GetDeletionGracePeriodSeconds() *int64 { + fake.getDeletionGracePeriodSecondsMutex.Lock() + ret, specificReturn := fake.getDeletionGracePeriodSecondsReturnsOnCall[len(fake.getDeletionGracePeriodSecondsArgsForCall)] + fake.getDeletionGracePeriodSecondsArgsForCall = append(fake.getDeletionGracePeriodSecondsArgsForCall, struct { + }{}) + stub := fake.GetDeletionGracePeriodSecondsStub + fakeReturns := fake.getDeletionGracePeriodSecondsReturns + fake.recordInvocation("GetDeletionGracePeriodSeconds", []interface{}{}) + fake.getDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetDeletionGracePeriodSecondsCallCount() int { + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.getDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *EnrollInstance) GetDeletionGracePeriodSecondsCalls(stub func() *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = stub +} + +func (fake *EnrollInstance) GetDeletionGracePeriodSecondsReturns(result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + fake.getDeletionGracePeriodSecondsReturns = struct { + result1 *int64 + }{result1} +} + +func (fake *EnrollInstance) GetDeletionGracePeriodSecondsReturnsOnCall(i int, result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + if fake.getDeletionGracePeriodSecondsReturnsOnCall == nil { + fake.getDeletionGracePeriodSecondsReturnsOnCall = make(map[int]struct { + result1 *int64 + }) + } + fake.getDeletionGracePeriodSecondsReturnsOnCall[i] = struct { + result1 *int64 + }{result1} +} + +func (fake *EnrollInstance) GetDeletionTimestamp() *v1.Time { + fake.getDeletionTimestampMutex.Lock() + ret, specificReturn := fake.getDeletionTimestampReturnsOnCall[len(fake.getDeletionTimestampArgsForCall)] + fake.getDeletionTimestampArgsForCall = append(fake.getDeletionTimestampArgsForCall, struct { + }{}) + stub := fake.GetDeletionTimestampStub + fakeReturns := fake.getDeletionTimestampReturns + fake.recordInvocation("GetDeletionTimestamp", []interface{}{}) + fake.getDeletionTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetDeletionTimestampCallCount() int { + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + return len(fake.getDeletionTimestampArgsForCall) +} + +func (fake *EnrollInstance) GetDeletionTimestampCalls(stub func() *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = stub +} + +func (fake *EnrollInstance) GetDeletionTimestampReturns(result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + fake.getDeletionTimestampReturns = struct { + result1 *v1.Time + }{result1} +} + +func (fake *EnrollInstance) GetDeletionTimestampReturnsOnCall(i int, result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + if fake.getDeletionTimestampReturnsOnCall == nil { + fake.getDeletionTimestampReturnsOnCall = make(map[int]struct { + result1 *v1.Time + }) + } + fake.getDeletionTimestampReturnsOnCall[i] = struct { + result1 *v1.Time + }{result1} +} + +func (fake *EnrollInstance) GetFinalizers() []string { + fake.getFinalizersMutex.Lock() + ret, specificReturn := fake.getFinalizersReturnsOnCall[len(fake.getFinalizersArgsForCall)] + fake.getFinalizersArgsForCall = append(fake.getFinalizersArgsForCall, struct { + }{}) + stub := fake.GetFinalizersStub + fakeReturns := fake.getFinalizersReturns + fake.recordInvocation("GetFinalizers", []interface{}{}) + fake.getFinalizersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetFinalizersCallCount() int { + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + return len(fake.getFinalizersArgsForCall) +} + +func (fake *EnrollInstance) GetFinalizersCalls(stub func() []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = stub +} + +func (fake *EnrollInstance) GetFinalizersReturns(result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + fake.getFinalizersReturns = struct { + result1 []string + }{result1} +} + +func (fake *EnrollInstance) GetFinalizersReturnsOnCall(i int, result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + if fake.getFinalizersReturnsOnCall == nil { + fake.getFinalizersReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getFinalizersReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *EnrollInstance) GetGenerateName() string { + fake.getGenerateNameMutex.Lock() + ret, specificReturn := fake.getGenerateNameReturnsOnCall[len(fake.getGenerateNameArgsForCall)] + fake.getGenerateNameArgsForCall = append(fake.getGenerateNameArgsForCall, struct { + }{}) + stub := fake.GetGenerateNameStub + fakeReturns := fake.getGenerateNameReturns + fake.recordInvocation("GetGenerateName", []interface{}{}) + fake.getGenerateNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetGenerateNameCallCount() int { + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + return len(fake.getGenerateNameArgsForCall) +} + +func (fake *EnrollInstance) GetGenerateNameCalls(stub func() string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = stub +} + +func (fake *EnrollInstance) GetGenerateNameReturns(result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + fake.getGenerateNameReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetGenerateNameReturnsOnCall(i int, result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + if fake.getGenerateNameReturnsOnCall == nil { + fake.getGenerateNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getGenerateNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetGeneration() int64 { + fake.getGenerationMutex.Lock() + ret, specificReturn := fake.getGenerationReturnsOnCall[len(fake.getGenerationArgsForCall)] + fake.getGenerationArgsForCall = append(fake.getGenerationArgsForCall, struct { + }{}) + stub := fake.GetGenerationStub + fakeReturns := fake.getGenerationReturns + fake.recordInvocation("GetGeneration", []interface{}{}) + fake.getGenerationMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetGenerationCallCount() int { + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + return len(fake.getGenerationArgsForCall) +} + +func (fake *EnrollInstance) GetGenerationCalls(stub func() int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = stub +} + +func (fake *EnrollInstance) GetGenerationReturns(result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + fake.getGenerationReturns = struct { + result1 int64 + }{result1} +} + +func (fake *EnrollInstance) GetGenerationReturnsOnCall(i int, result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + if fake.getGenerationReturnsOnCall == nil { + fake.getGenerationReturnsOnCall = make(map[int]struct { + result1 int64 + }) + } + fake.getGenerationReturnsOnCall[i] = struct { + result1 int64 + }{result1} +} + +func (fake *EnrollInstance) GetLabels() map[string]string { + fake.getLabelsMutex.Lock() + ret, specificReturn := fake.getLabelsReturnsOnCall[len(fake.getLabelsArgsForCall)] + fake.getLabelsArgsForCall = append(fake.getLabelsArgsForCall, struct { + }{}) + stub := fake.GetLabelsStub + fakeReturns := fake.getLabelsReturns + fake.recordInvocation("GetLabels", []interface{}{}) + fake.getLabelsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetLabelsCallCount() int { + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + return len(fake.getLabelsArgsForCall) +} + +func (fake *EnrollInstance) GetLabelsCalls(stub func() map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = stub +} + +func (fake *EnrollInstance) GetLabelsReturns(result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + fake.getLabelsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *EnrollInstance) GetLabelsReturnsOnCall(i int, result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + if fake.getLabelsReturnsOnCall == nil { + fake.getLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getLabelsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *EnrollInstance) GetManagedFields() []v1.ManagedFieldsEntry { + fake.getManagedFieldsMutex.Lock() + ret, specificReturn := fake.getManagedFieldsReturnsOnCall[len(fake.getManagedFieldsArgsForCall)] + fake.getManagedFieldsArgsForCall = append(fake.getManagedFieldsArgsForCall, struct { + }{}) + stub := fake.GetManagedFieldsStub + fakeReturns := fake.getManagedFieldsReturns + fake.recordInvocation("GetManagedFields", []interface{}{}) + fake.getManagedFieldsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetManagedFieldsCallCount() int { + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + return len(fake.getManagedFieldsArgsForCall) +} + +func (fake *EnrollInstance) GetManagedFieldsCalls(stub func() []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = stub +} + +func (fake *EnrollInstance) GetManagedFieldsReturns(result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + fake.getManagedFieldsReturns = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *EnrollInstance) GetManagedFieldsReturnsOnCall(i int, result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + if fake.getManagedFieldsReturnsOnCall == nil { + fake.getManagedFieldsReturnsOnCall = make(map[int]struct { + result1 []v1.ManagedFieldsEntry + }) + } + fake.getManagedFieldsReturnsOnCall[i] = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *EnrollInstance) GetName() string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + }{}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *EnrollInstance) GetNameCalls(stub func() string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *EnrollInstance) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetNamespace() string { + fake.getNamespaceMutex.Lock() + ret, specificReturn := fake.getNamespaceReturnsOnCall[len(fake.getNamespaceArgsForCall)] + fake.getNamespaceArgsForCall = append(fake.getNamespaceArgsForCall, struct { + }{}) + stub := fake.GetNamespaceStub + fakeReturns := fake.getNamespaceReturns + fake.recordInvocation("GetNamespace", []interface{}{}) + fake.getNamespaceMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetNamespaceCallCount() int { + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + return len(fake.getNamespaceArgsForCall) +} + +func (fake *EnrollInstance) GetNamespaceCalls(stub func() string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = stub +} + +func (fake *EnrollInstance) GetNamespaceReturns(result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + fake.getNamespaceReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetNamespaceReturnsOnCall(i int, result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + if fake.getNamespaceReturnsOnCall == nil { + fake.getNamespaceReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNamespaceReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetObjectKind() schema.ObjectKind { + fake.getObjectKindMutex.Lock() + ret, specificReturn := fake.getObjectKindReturnsOnCall[len(fake.getObjectKindArgsForCall)] + fake.getObjectKindArgsForCall = append(fake.getObjectKindArgsForCall, struct { + }{}) + stub := fake.GetObjectKindStub + fakeReturns := fake.getObjectKindReturns + fake.recordInvocation("GetObjectKind", []interface{}{}) + fake.getObjectKindMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetObjectKindCallCount() int { + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + return len(fake.getObjectKindArgsForCall) +} + +func (fake *EnrollInstance) GetObjectKindCalls(stub func() schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = stub +} + +func (fake *EnrollInstance) GetObjectKindReturns(result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + fake.getObjectKindReturns = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *EnrollInstance) GetObjectKindReturnsOnCall(i int, result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + if fake.getObjectKindReturnsOnCall == nil { + fake.getObjectKindReturnsOnCall = make(map[int]struct { + result1 schema.ObjectKind + }) + } + fake.getObjectKindReturnsOnCall[i] = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *EnrollInstance) GetOwnerReferences() []v1.OwnerReference { + fake.getOwnerReferencesMutex.Lock() + ret, specificReturn := fake.getOwnerReferencesReturnsOnCall[len(fake.getOwnerReferencesArgsForCall)] + fake.getOwnerReferencesArgsForCall = append(fake.getOwnerReferencesArgsForCall, struct { + }{}) + stub := fake.GetOwnerReferencesStub + fakeReturns := fake.getOwnerReferencesReturns + fake.recordInvocation("GetOwnerReferences", []interface{}{}) + fake.getOwnerReferencesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetOwnerReferencesCallCount() int { + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + return len(fake.getOwnerReferencesArgsForCall) +} + +func (fake *EnrollInstance) GetOwnerReferencesCalls(stub func() []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = stub +} + +func (fake *EnrollInstance) GetOwnerReferencesReturns(result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + fake.getOwnerReferencesReturns = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *EnrollInstance) GetOwnerReferencesReturnsOnCall(i int, result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + if fake.getOwnerReferencesReturnsOnCall == nil { + fake.getOwnerReferencesReturnsOnCall = make(map[int]struct { + result1 []v1.OwnerReference + }) + } + fake.getOwnerReferencesReturnsOnCall[i] = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *EnrollInstance) GetPullSecrets() []v1a.LocalObjectReference { + fake.getPullSecretsMutex.Lock() + ret, specificReturn := fake.getPullSecretsReturnsOnCall[len(fake.getPullSecretsArgsForCall)] + fake.getPullSecretsArgsForCall = append(fake.getPullSecretsArgsForCall, struct { + }{}) + stub := fake.GetPullSecretsStub + fakeReturns := fake.getPullSecretsReturns + fake.recordInvocation("GetPullSecrets", []interface{}{}) + fake.getPullSecretsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetPullSecretsCallCount() int { + fake.getPullSecretsMutex.RLock() + defer fake.getPullSecretsMutex.RUnlock() + return len(fake.getPullSecretsArgsForCall) +} + +func (fake *EnrollInstance) GetPullSecretsCalls(stub func() []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = stub +} + +func (fake *EnrollInstance) GetPullSecretsReturns(result1 []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = nil + fake.getPullSecretsReturns = struct { + result1 []v1a.LocalObjectReference + }{result1} +} + +func (fake *EnrollInstance) GetPullSecretsReturnsOnCall(i int, result1 []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = nil + if fake.getPullSecretsReturnsOnCall == nil { + fake.getPullSecretsReturnsOnCall = make(map[int]struct { + result1 []v1a.LocalObjectReference + }) + } + fake.getPullSecretsReturnsOnCall[i] = struct { + result1 []v1a.LocalObjectReference + }{result1} +} + +func (fake *EnrollInstance) GetResource(arg1 v1beta1.Component) v1a.ResourceRequirements { + fake.getResourceMutex.Lock() + ret, specificReturn := fake.getResourceReturnsOnCall[len(fake.getResourceArgsForCall)] + fake.getResourceArgsForCall = append(fake.getResourceArgsForCall, struct { + arg1 v1beta1.Component + }{arg1}) + stub := fake.GetResourceStub + fakeReturns := fake.getResourceReturns + fake.recordInvocation("GetResource", []interface{}{arg1}) + fake.getResourceMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetResourceCallCount() int { + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + return len(fake.getResourceArgsForCall) +} + +func (fake *EnrollInstance) GetResourceCalls(stub func(v1beta1.Component) v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = stub +} + +func (fake *EnrollInstance) GetResourceArgsForCall(i int) v1beta1.Component { + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + argsForCall := fake.getResourceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) GetResourceReturns(result1 v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = nil + fake.getResourceReturns = struct { + result1 v1a.ResourceRequirements + }{result1} +} + +func (fake *EnrollInstance) GetResourceReturnsOnCall(i int, result1 v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = nil + if fake.getResourceReturnsOnCall == nil { + fake.getResourceReturnsOnCall = make(map[int]struct { + result1 v1a.ResourceRequirements + }) + } + fake.getResourceReturnsOnCall[i] = struct { + result1 v1a.ResourceRequirements + }{result1} +} + +func (fake *EnrollInstance) GetResourceVersion() string { + fake.getResourceVersionMutex.Lock() + ret, specificReturn := fake.getResourceVersionReturnsOnCall[len(fake.getResourceVersionArgsForCall)] + fake.getResourceVersionArgsForCall = append(fake.getResourceVersionArgsForCall, struct { + }{}) + stub := fake.GetResourceVersionStub + fakeReturns := fake.getResourceVersionReturns + fake.recordInvocation("GetResourceVersion", []interface{}{}) + fake.getResourceVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetResourceVersionCallCount() int { + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + return len(fake.getResourceVersionArgsForCall) +} + +func (fake *EnrollInstance) GetResourceVersionCalls(stub func() string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = stub +} + +func (fake *EnrollInstance) GetResourceVersionReturns(result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + fake.getResourceVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetResourceVersionReturnsOnCall(i int, result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + if fake.getResourceVersionReturnsOnCall == nil { + fake.getResourceVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getResourceVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetSelfLink() string { + fake.getSelfLinkMutex.Lock() + ret, specificReturn := fake.getSelfLinkReturnsOnCall[len(fake.getSelfLinkArgsForCall)] + fake.getSelfLinkArgsForCall = append(fake.getSelfLinkArgsForCall, struct { + }{}) + stub := fake.GetSelfLinkStub + fakeReturns := fake.getSelfLinkReturns + fake.recordInvocation("GetSelfLink", []interface{}{}) + fake.getSelfLinkMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetSelfLinkCallCount() int { + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + return len(fake.getSelfLinkArgsForCall) +} + +func (fake *EnrollInstance) GetSelfLinkCalls(stub func() string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = stub +} + +func (fake *EnrollInstance) GetSelfLinkReturns(result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + fake.getSelfLinkReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetSelfLinkReturnsOnCall(i int, result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + if fake.getSelfLinkReturnsOnCall == nil { + fake.getSelfLinkReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getSelfLinkReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) GetUID() types.UID { + fake.getUIDMutex.Lock() + ret, specificReturn := fake.getUIDReturnsOnCall[len(fake.getUIDArgsForCall)] + fake.getUIDArgsForCall = append(fake.getUIDArgsForCall, struct { + }{}) + stub := fake.GetUIDStub + fakeReturns := fake.getUIDReturns + fake.recordInvocation("GetUID", []interface{}{}) + fake.getUIDMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) GetUIDCallCount() int { + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + return len(fake.getUIDArgsForCall) +} + +func (fake *EnrollInstance) GetUIDCalls(stub func() types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = stub +} + +func (fake *EnrollInstance) GetUIDReturns(result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + fake.getUIDReturns = struct { + result1 types.UID + }{result1} +} + +func (fake *EnrollInstance) GetUIDReturnsOnCall(i int, result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + if fake.getUIDReturnsOnCall == nil { + fake.getUIDReturnsOnCall = make(map[int]struct { + result1 types.UID + }) + } + fake.getUIDReturnsOnCall[i] = struct { + result1 types.UID + }{result1} +} + +func (fake *EnrollInstance) IsHSMEnabled() bool { + fake.isHSMEnabledMutex.Lock() + ret, specificReturn := fake.isHSMEnabledReturnsOnCall[len(fake.isHSMEnabledArgsForCall)] + fake.isHSMEnabledArgsForCall = append(fake.isHSMEnabledArgsForCall, struct { + }{}) + stub := fake.IsHSMEnabledStub + fakeReturns := fake.isHSMEnabledReturns + fake.recordInvocation("IsHSMEnabled", []interface{}{}) + fake.isHSMEnabledMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) IsHSMEnabledCallCount() int { + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + return len(fake.isHSMEnabledArgsForCall) +} + +func (fake *EnrollInstance) IsHSMEnabledCalls(stub func() bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = stub +} + +func (fake *EnrollInstance) IsHSMEnabledReturns(result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + fake.isHSMEnabledReturns = struct { + result1 bool + }{result1} +} + +func (fake *EnrollInstance) IsHSMEnabledReturnsOnCall(i int, result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + if fake.isHSMEnabledReturnsOnCall == nil { + fake.isHSMEnabledReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isHSMEnabledReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *EnrollInstance) PVCName() string { + fake.pVCNameMutex.Lock() + ret, specificReturn := fake.pVCNameReturnsOnCall[len(fake.pVCNameArgsForCall)] + fake.pVCNameArgsForCall = append(fake.pVCNameArgsForCall, struct { + }{}) + stub := fake.PVCNameStub + fakeReturns := fake.pVCNameReturns + fake.recordInvocation("PVCName", []interface{}{}) + fake.pVCNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) PVCNameCallCount() int { + fake.pVCNameMutex.RLock() + defer fake.pVCNameMutex.RUnlock() + return len(fake.pVCNameArgsForCall) +} + +func (fake *EnrollInstance) PVCNameCalls(stub func() string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = stub +} + +func (fake *EnrollInstance) PVCNameReturns(result1 string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = nil + fake.pVCNameReturns = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) PVCNameReturnsOnCall(i int, result1 string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = nil + if fake.pVCNameReturnsOnCall == nil { + fake.pVCNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.pVCNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *EnrollInstance) SetAnnotations(arg1 map[string]string) { + fake.setAnnotationsMutex.Lock() + fake.setAnnotationsArgsForCall = append(fake.setAnnotationsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetAnnotationsStub + fake.recordInvocation("SetAnnotations", []interface{}{arg1}) + fake.setAnnotationsMutex.Unlock() + if stub != nil { + fake.SetAnnotationsStub(arg1) + } +} + +func (fake *EnrollInstance) SetAnnotationsCallCount() int { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + return len(fake.setAnnotationsArgsForCall) +} + +func (fake *EnrollInstance) SetAnnotationsCalls(stub func(map[string]string)) { + fake.setAnnotationsMutex.Lock() + defer fake.setAnnotationsMutex.Unlock() + fake.SetAnnotationsStub = stub +} + +func (fake *EnrollInstance) SetAnnotationsArgsForCall(i int) map[string]string { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + argsForCall := fake.setAnnotationsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetClusterName(arg1 string) { + fake.setClusterNameMutex.Lock() + fake.setClusterNameArgsForCall = append(fake.setClusterNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetClusterNameStub + fake.recordInvocation("SetClusterName", []interface{}{arg1}) + fake.setClusterNameMutex.Unlock() + if stub != nil { + fake.SetClusterNameStub(arg1) + } +} + +func (fake *EnrollInstance) SetClusterNameCallCount() int { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + return len(fake.setClusterNameArgsForCall) +} + +func (fake *EnrollInstance) SetClusterNameCalls(stub func(string)) { + fake.setClusterNameMutex.Lock() + defer fake.setClusterNameMutex.Unlock() + fake.SetClusterNameStub = stub +} + +func (fake *EnrollInstance) SetClusterNameArgsForCall(i int) string { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + argsForCall := fake.setClusterNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetCreationTimestamp(arg1 v1.Time) { + fake.setCreationTimestampMutex.Lock() + fake.setCreationTimestampArgsForCall = append(fake.setCreationTimestampArgsForCall, struct { + arg1 v1.Time + }{arg1}) + stub := fake.SetCreationTimestampStub + fake.recordInvocation("SetCreationTimestamp", []interface{}{arg1}) + fake.setCreationTimestampMutex.Unlock() + if stub != nil { + fake.SetCreationTimestampStub(arg1) + } +} + +func (fake *EnrollInstance) SetCreationTimestampCallCount() int { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + return len(fake.setCreationTimestampArgsForCall) +} + +func (fake *EnrollInstance) SetCreationTimestampCalls(stub func(v1.Time)) { + fake.setCreationTimestampMutex.Lock() + defer fake.setCreationTimestampMutex.Unlock() + fake.SetCreationTimestampStub = stub +} + +func (fake *EnrollInstance) SetCreationTimestampArgsForCall(i int) v1.Time { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + argsForCall := fake.setCreationTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetDeletionGracePeriodSeconds(arg1 *int64) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + fake.setDeletionGracePeriodSecondsArgsForCall = append(fake.setDeletionGracePeriodSecondsArgsForCall, struct { + arg1 *int64 + }{arg1}) + stub := fake.SetDeletionGracePeriodSecondsStub + fake.recordInvocation("SetDeletionGracePeriodSeconds", []interface{}{arg1}) + fake.setDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + fake.SetDeletionGracePeriodSecondsStub(arg1) + } +} + +func (fake *EnrollInstance) SetDeletionGracePeriodSecondsCallCount() int { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.setDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *EnrollInstance) SetDeletionGracePeriodSecondsCalls(stub func(*int64)) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + defer fake.setDeletionGracePeriodSecondsMutex.Unlock() + fake.SetDeletionGracePeriodSecondsStub = stub +} + +func (fake *EnrollInstance) SetDeletionGracePeriodSecondsArgsForCall(i int) *int64 { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + argsForCall := fake.setDeletionGracePeriodSecondsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetDeletionTimestamp(arg1 *v1.Time) { + fake.setDeletionTimestampMutex.Lock() + fake.setDeletionTimestampArgsForCall = append(fake.setDeletionTimestampArgsForCall, struct { + arg1 *v1.Time + }{arg1}) + stub := fake.SetDeletionTimestampStub + fake.recordInvocation("SetDeletionTimestamp", []interface{}{arg1}) + fake.setDeletionTimestampMutex.Unlock() + if stub != nil { + fake.SetDeletionTimestampStub(arg1) + } +} + +func (fake *EnrollInstance) SetDeletionTimestampCallCount() int { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + return len(fake.setDeletionTimestampArgsForCall) +} + +func (fake *EnrollInstance) SetDeletionTimestampCalls(stub func(*v1.Time)) { + fake.setDeletionTimestampMutex.Lock() + defer fake.setDeletionTimestampMutex.Unlock() + fake.SetDeletionTimestampStub = stub +} + +func (fake *EnrollInstance) SetDeletionTimestampArgsForCall(i int) *v1.Time { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + argsForCall := fake.setDeletionTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetFinalizers(arg1 []string) { + var arg1Copy []string + if arg1 != nil { + arg1Copy = make([]string, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setFinalizersMutex.Lock() + fake.setFinalizersArgsForCall = append(fake.setFinalizersArgsForCall, struct { + arg1 []string + }{arg1Copy}) + stub := fake.SetFinalizersStub + fake.recordInvocation("SetFinalizers", []interface{}{arg1Copy}) + fake.setFinalizersMutex.Unlock() + if stub != nil { + fake.SetFinalizersStub(arg1) + } +} + +func (fake *EnrollInstance) SetFinalizersCallCount() int { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + return len(fake.setFinalizersArgsForCall) +} + +func (fake *EnrollInstance) SetFinalizersCalls(stub func([]string)) { + fake.setFinalizersMutex.Lock() + defer fake.setFinalizersMutex.Unlock() + fake.SetFinalizersStub = stub +} + +func (fake *EnrollInstance) SetFinalizersArgsForCall(i int) []string { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + argsForCall := fake.setFinalizersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetGenerateName(arg1 string) { + fake.setGenerateNameMutex.Lock() + fake.setGenerateNameArgsForCall = append(fake.setGenerateNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetGenerateNameStub + fake.recordInvocation("SetGenerateName", []interface{}{arg1}) + fake.setGenerateNameMutex.Unlock() + if stub != nil { + fake.SetGenerateNameStub(arg1) + } +} + +func (fake *EnrollInstance) SetGenerateNameCallCount() int { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + return len(fake.setGenerateNameArgsForCall) +} + +func (fake *EnrollInstance) SetGenerateNameCalls(stub func(string)) { + fake.setGenerateNameMutex.Lock() + defer fake.setGenerateNameMutex.Unlock() + fake.SetGenerateNameStub = stub +} + +func (fake *EnrollInstance) SetGenerateNameArgsForCall(i int) string { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + argsForCall := fake.setGenerateNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetGeneration(arg1 int64) { + fake.setGenerationMutex.Lock() + fake.setGenerationArgsForCall = append(fake.setGenerationArgsForCall, struct { + arg1 int64 + }{arg1}) + stub := fake.SetGenerationStub + fake.recordInvocation("SetGeneration", []interface{}{arg1}) + fake.setGenerationMutex.Unlock() + if stub != nil { + fake.SetGenerationStub(arg1) + } +} + +func (fake *EnrollInstance) SetGenerationCallCount() int { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + return len(fake.setGenerationArgsForCall) +} + +func (fake *EnrollInstance) SetGenerationCalls(stub func(int64)) { + fake.setGenerationMutex.Lock() + defer fake.setGenerationMutex.Unlock() + fake.SetGenerationStub = stub +} + +func (fake *EnrollInstance) SetGenerationArgsForCall(i int) int64 { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + argsForCall := fake.setGenerationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetLabels(arg1 map[string]string) { + fake.setLabelsMutex.Lock() + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetLabelsStub + fake.recordInvocation("SetLabels", []interface{}{arg1}) + fake.setLabelsMutex.Unlock() + if stub != nil { + fake.SetLabelsStub(arg1) + } +} + +func (fake *EnrollInstance) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *EnrollInstance) SetLabelsCalls(stub func(map[string]string)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *EnrollInstance) SetLabelsArgsForCall(i int) map[string]string { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetManagedFields(arg1 []v1.ManagedFieldsEntry) { + var arg1Copy []v1.ManagedFieldsEntry + if arg1 != nil { + arg1Copy = make([]v1.ManagedFieldsEntry, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setManagedFieldsMutex.Lock() + fake.setManagedFieldsArgsForCall = append(fake.setManagedFieldsArgsForCall, struct { + arg1 []v1.ManagedFieldsEntry + }{arg1Copy}) + stub := fake.SetManagedFieldsStub + fake.recordInvocation("SetManagedFields", []interface{}{arg1Copy}) + fake.setManagedFieldsMutex.Unlock() + if stub != nil { + fake.SetManagedFieldsStub(arg1) + } +} + +func (fake *EnrollInstance) SetManagedFieldsCallCount() int { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + return len(fake.setManagedFieldsArgsForCall) +} + +func (fake *EnrollInstance) SetManagedFieldsCalls(stub func([]v1.ManagedFieldsEntry)) { + fake.setManagedFieldsMutex.Lock() + defer fake.setManagedFieldsMutex.Unlock() + fake.SetManagedFieldsStub = stub +} + +func (fake *EnrollInstance) SetManagedFieldsArgsForCall(i int) []v1.ManagedFieldsEntry { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + argsForCall := fake.setManagedFieldsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetName(arg1 string) { + fake.setNameMutex.Lock() + fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNameStub + fake.recordInvocation("SetName", []interface{}{arg1}) + fake.setNameMutex.Unlock() + if stub != nil { + fake.SetNameStub(arg1) + } +} + +func (fake *EnrollInstance) SetNameCallCount() int { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + return len(fake.setNameArgsForCall) +} + +func (fake *EnrollInstance) SetNameCalls(stub func(string)) { + fake.setNameMutex.Lock() + defer fake.setNameMutex.Unlock() + fake.SetNameStub = stub +} + +func (fake *EnrollInstance) SetNameArgsForCall(i int) string { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + argsForCall := fake.setNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetNamespace(arg1 string) { + fake.setNamespaceMutex.Lock() + fake.setNamespaceArgsForCall = append(fake.setNamespaceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNamespaceStub + fake.recordInvocation("SetNamespace", []interface{}{arg1}) + fake.setNamespaceMutex.Unlock() + if stub != nil { + fake.SetNamespaceStub(arg1) + } +} + +func (fake *EnrollInstance) SetNamespaceCallCount() int { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + return len(fake.setNamespaceArgsForCall) +} + +func (fake *EnrollInstance) SetNamespaceCalls(stub func(string)) { + fake.setNamespaceMutex.Lock() + defer fake.setNamespaceMutex.Unlock() + fake.SetNamespaceStub = stub +} + +func (fake *EnrollInstance) SetNamespaceArgsForCall(i int) string { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + argsForCall := fake.setNamespaceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetOwnerReferences(arg1 []v1.OwnerReference) { + var arg1Copy []v1.OwnerReference + if arg1 != nil { + arg1Copy = make([]v1.OwnerReference, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setOwnerReferencesMutex.Lock() + fake.setOwnerReferencesArgsForCall = append(fake.setOwnerReferencesArgsForCall, struct { + arg1 []v1.OwnerReference + }{arg1Copy}) + stub := fake.SetOwnerReferencesStub + fake.recordInvocation("SetOwnerReferences", []interface{}{arg1Copy}) + fake.setOwnerReferencesMutex.Unlock() + if stub != nil { + fake.SetOwnerReferencesStub(arg1) + } +} + +func (fake *EnrollInstance) SetOwnerReferencesCallCount() int { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + return len(fake.setOwnerReferencesArgsForCall) +} + +func (fake *EnrollInstance) SetOwnerReferencesCalls(stub func([]v1.OwnerReference)) { + fake.setOwnerReferencesMutex.Lock() + defer fake.setOwnerReferencesMutex.Unlock() + fake.SetOwnerReferencesStub = stub +} + +func (fake *EnrollInstance) SetOwnerReferencesArgsForCall(i int) []v1.OwnerReference { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + argsForCall := fake.setOwnerReferencesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetResourceVersion(arg1 string) { + fake.setResourceVersionMutex.Lock() + fake.setResourceVersionArgsForCall = append(fake.setResourceVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetResourceVersionStub + fake.recordInvocation("SetResourceVersion", []interface{}{arg1}) + fake.setResourceVersionMutex.Unlock() + if stub != nil { + fake.SetResourceVersionStub(arg1) + } +} + +func (fake *EnrollInstance) SetResourceVersionCallCount() int { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + return len(fake.setResourceVersionArgsForCall) +} + +func (fake *EnrollInstance) SetResourceVersionCalls(stub func(string)) { + fake.setResourceVersionMutex.Lock() + defer fake.setResourceVersionMutex.Unlock() + fake.SetResourceVersionStub = stub +} + +func (fake *EnrollInstance) SetResourceVersionArgsForCall(i int) string { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + argsForCall := fake.setResourceVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetSelfLink(arg1 string) { + fake.setSelfLinkMutex.Lock() + fake.setSelfLinkArgsForCall = append(fake.setSelfLinkArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetSelfLinkStub + fake.recordInvocation("SetSelfLink", []interface{}{arg1}) + fake.setSelfLinkMutex.Unlock() + if stub != nil { + fake.SetSelfLinkStub(arg1) + } +} + +func (fake *EnrollInstance) SetSelfLinkCallCount() int { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + return len(fake.setSelfLinkArgsForCall) +} + +func (fake *EnrollInstance) SetSelfLinkCalls(stub func(string)) { + fake.setSelfLinkMutex.Lock() + defer fake.setSelfLinkMutex.Unlock() + fake.SetSelfLinkStub = stub +} + +func (fake *EnrollInstance) SetSelfLinkArgsForCall(i int) string { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + argsForCall := fake.setSelfLinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) SetUID(arg1 types.UID) { + fake.setUIDMutex.Lock() + fake.setUIDArgsForCall = append(fake.setUIDArgsForCall, struct { + arg1 types.UID + }{arg1}) + stub := fake.SetUIDStub + fake.recordInvocation("SetUID", []interface{}{arg1}) + fake.setUIDMutex.Unlock() + if stub != nil { + fake.SetUIDStub(arg1) + } +} + +func (fake *EnrollInstance) SetUIDCallCount() int { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + return len(fake.setUIDArgsForCall) +} + +func (fake *EnrollInstance) SetUIDCalls(stub func(types.UID)) { + fake.setUIDMutex.Lock() + defer fake.setUIDMutex.Unlock() + fake.SetUIDStub = stub +} + +func (fake *EnrollInstance) SetUIDArgsForCall(i int) types.UID { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + argsForCall := fake.setUIDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *EnrollInstance) UsingHSMProxy() bool { + fake.usingHSMProxyMutex.Lock() + ret, specificReturn := fake.usingHSMProxyReturnsOnCall[len(fake.usingHSMProxyArgsForCall)] + fake.usingHSMProxyArgsForCall = append(fake.usingHSMProxyArgsForCall, struct { + }{}) + stub := fake.UsingHSMProxyStub + fakeReturns := fake.usingHSMProxyReturns + fake.recordInvocation("UsingHSMProxy", []interface{}{}) + fake.usingHSMProxyMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *EnrollInstance) UsingHSMProxyCallCount() int { + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + return len(fake.usingHSMProxyArgsForCall) +} + +func (fake *EnrollInstance) UsingHSMProxyCalls(stub func() bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = stub +} + +func (fake *EnrollInstance) UsingHSMProxyReturns(result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + fake.usingHSMProxyReturns = struct { + result1 bool + }{result1} +} + +func (fake *EnrollInstance) UsingHSMProxyReturnsOnCall(i int, result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + if fake.usingHSMProxyReturnsOnCall == nil { + fake.usingHSMProxyReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.usingHSMProxyReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *EnrollInstance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + fake.enrollerImageMutex.RLock() + defer fake.enrollerImageMutex.RUnlock() + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + fake.getConfigOverrideMutex.RLock() + defer fake.getConfigOverrideMutex.RUnlock() + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + fake.getPullSecretsMutex.RLock() + defer fake.getPullSecretsMutex.RUnlock() + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + fake.pVCNameMutex.RLock() + defer fake.pVCNameMutex.RUnlock() + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *EnrollInstance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ action.EnrollInstance = new(EnrollInstance) diff --git a/pkg/action/mocks/reenroller.go b/pkg/action/mocks/reenroller.go new file mode 100644 index 00000000..0f2d845c --- /dev/null +++ b/pkg/action/mocks/reenroller.go @@ -0,0 +1,117 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "k8s.io/apimachinery/pkg/runtime" +) + +type Reenroller struct { + RenewCertStub func(common.SecretType, runtime.Object, bool) error + renewCertMutex sync.RWMutex + renewCertArgsForCall []struct { + arg1 common.SecretType + arg2 runtime.Object + arg3 bool + } + renewCertReturns struct { + result1 error + } + renewCertReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Reenroller) RenewCert(arg1 common.SecretType, arg2 runtime.Object, arg3 bool) error { + fake.renewCertMutex.Lock() + ret, specificReturn := fake.renewCertReturnsOnCall[len(fake.renewCertArgsForCall)] + fake.renewCertArgsForCall = append(fake.renewCertArgsForCall, struct { + arg1 common.SecretType + arg2 runtime.Object + arg3 bool + }{arg1, arg2, arg3}) + stub := fake.RenewCertStub + fakeReturns := fake.renewCertReturns + fake.recordInvocation("RenewCert", []interface{}{arg1, arg2, arg3}) + fake.renewCertMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Reenroller) RenewCertCallCount() int { + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + return len(fake.renewCertArgsForCall) +} + +func (fake *Reenroller) RenewCertCalls(stub func(common.SecretType, runtime.Object, bool) error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = stub +} + +func (fake *Reenroller) RenewCertArgsForCall(i int) (common.SecretType, runtime.Object, bool) { + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + argsForCall := fake.renewCertArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Reenroller) RenewCertReturns(result1 error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = nil + fake.renewCertReturns = struct { + result1 error + }{result1} +} + +func (fake *Reenroller) RenewCertReturnsOnCall(i int, result1 error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = nil + if fake.renewCertReturnsOnCall == nil { + fake.renewCertReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.renewCertReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Reenroller) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Reenroller) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ action.Reenroller = new(Reenroller) diff --git a/pkg/action/mocks/reenrollinstance.go b/pkg/action/mocks/reenrollinstance.go new file mode 100644 index 00000000..6e9e1da8 --- /dev/null +++ b/pkg/action/mocks/reenrollinstance.go @@ -0,0 +1,1910 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +type ReenrollInstance struct { + DeepCopyObjectStub func() runtime.Object + deepCopyObjectMutex sync.RWMutex + deepCopyObjectArgsForCall []struct { + } + deepCopyObjectReturns struct { + result1 runtime.Object + } + deepCopyObjectReturnsOnCall map[int]struct { + result1 runtime.Object + } + GetAnnotationsStub func() map[string]string + getAnnotationsMutex sync.RWMutex + getAnnotationsArgsForCall []struct { + } + getAnnotationsReturns struct { + result1 map[string]string + } + getAnnotationsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetClusterNameStub func() string + getClusterNameMutex sync.RWMutex + getClusterNameArgsForCall []struct { + } + getClusterNameReturns struct { + result1 string + } + getClusterNameReturnsOnCall map[int]struct { + result1 string + } + GetCreationTimestampStub func() v1.Time + getCreationTimestampMutex sync.RWMutex + getCreationTimestampArgsForCall []struct { + } + getCreationTimestampReturns struct { + result1 v1.Time + } + getCreationTimestampReturnsOnCall map[int]struct { + result1 v1.Time + } + GetDeletionGracePeriodSecondsStub func() *int64 + getDeletionGracePeriodSecondsMutex sync.RWMutex + getDeletionGracePeriodSecondsArgsForCall []struct { + } + getDeletionGracePeriodSecondsReturns struct { + result1 *int64 + } + getDeletionGracePeriodSecondsReturnsOnCall map[int]struct { + result1 *int64 + } + GetDeletionTimestampStub func() *v1.Time + getDeletionTimestampMutex sync.RWMutex + getDeletionTimestampArgsForCall []struct { + } + getDeletionTimestampReturns struct { + result1 *v1.Time + } + getDeletionTimestampReturnsOnCall map[int]struct { + result1 *v1.Time + } + GetFinalizersStub func() []string + getFinalizersMutex sync.RWMutex + getFinalizersArgsForCall []struct { + } + getFinalizersReturns struct { + result1 []string + } + getFinalizersReturnsOnCall map[int]struct { + result1 []string + } + GetGenerateNameStub func() string + getGenerateNameMutex sync.RWMutex + getGenerateNameArgsForCall []struct { + } + getGenerateNameReturns struct { + result1 string + } + getGenerateNameReturnsOnCall map[int]struct { + result1 string + } + GetGenerationStub func() int64 + getGenerationMutex sync.RWMutex + getGenerationArgsForCall []struct { + } + getGenerationReturns struct { + result1 int64 + } + getGenerationReturnsOnCall map[int]struct { + result1 int64 + } + GetLabelsStub func() map[string]string + getLabelsMutex sync.RWMutex + getLabelsArgsForCall []struct { + } + getLabelsReturns struct { + result1 map[string]string + } + getLabelsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetManagedFieldsStub func() []v1.ManagedFieldsEntry + getManagedFieldsMutex sync.RWMutex + getManagedFieldsArgsForCall []struct { + } + getManagedFieldsReturns struct { + result1 []v1.ManagedFieldsEntry + } + getManagedFieldsReturnsOnCall map[int]struct { + result1 []v1.ManagedFieldsEntry + } + GetNameStub func() string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetNamespaceStub func() string + getNamespaceMutex sync.RWMutex + getNamespaceArgsForCall []struct { + } + getNamespaceReturns struct { + result1 string + } + getNamespaceReturnsOnCall map[int]struct { + result1 string + } + GetObjectKindStub func() schema.ObjectKind + getObjectKindMutex sync.RWMutex + getObjectKindArgsForCall []struct { + } + getObjectKindReturns struct { + result1 schema.ObjectKind + } + getObjectKindReturnsOnCall map[int]struct { + result1 schema.ObjectKind + } + GetOwnerReferencesStub func() []v1.OwnerReference + getOwnerReferencesMutex sync.RWMutex + getOwnerReferencesArgsForCall []struct { + } + getOwnerReferencesReturns struct { + result1 []v1.OwnerReference + } + getOwnerReferencesReturnsOnCall map[int]struct { + result1 []v1.OwnerReference + } + GetResourceVersionStub func() string + getResourceVersionMutex sync.RWMutex + getResourceVersionArgsForCall []struct { + } + getResourceVersionReturns struct { + result1 string + } + getResourceVersionReturnsOnCall map[int]struct { + result1 string + } + GetSelfLinkStub func() string + getSelfLinkMutex sync.RWMutex + getSelfLinkArgsForCall []struct { + } + getSelfLinkReturns struct { + result1 string + } + getSelfLinkReturnsOnCall map[int]struct { + result1 string + } + GetUIDStub func() types.UID + getUIDMutex sync.RWMutex + getUIDArgsForCall []struct { + } + getUIDReturns struct { + result1 types.UID + } + getUIDReturnsOnCall map[int]struct { + result1 types.UID + } + ResetEcertReenrollStub func() + resetEcertReenrollMutex sync.RWMutex + resetEcertReenrollArgsForCall []struct { + } + ResetTLSReenrollStub func() + resetTLSReenrollMutex sync.RWMutex + resetTLSReenrollArgsForCall []struct { + } + SetAnnotationsStub func(map[string]string) + setAnnotationsMutex sync.RWMutex + setAnnotationsArgsForCall []struct { + arg1 map[string]string + } + SetClusterNameStub func(string) + setClusterNameMutex sync.RWMutex + setClusterNameArgsForCall []struct { + arg1 string + } + SetCreationTimestampStub func(v1.Time) + setCreationTimestampMutex sync.RWMutex + setCreationTimestampArgsForCall []struct { + arg1 v1.Time + } + SetDeletionGracePeriodSecondsStub func(*int64) + setDeletionGracePeriodSecondsMutex sync.RWMutex + setDeletionGracePeriodSecondsArgsForCall []struct { + arg1 *int64 + } + SetDeletionTimestampStub func(*v1.Time) + setDeletionTimestampMutex sync.RWMutex + setDeletionTimestampArgsForCall []struct { + arg1 *v1.Time + } + SetFinalizersStub func([]string) + setFinalizersMutex sync.RWMutex + setFinalizersArgsForCall []struct { + arg1 []string + } + SetGenerateNameStub func(string) + setGenerateNameMutex sync.RWMutex + setGenerateNameArgsForCall []struct { + arg1 string + } + SetGenerationStub func(int64) + setGenerationMutex sync.RWMutex + setGenerationArgsForCall []struct { + arg1 int64 + } + SetLabelsStub func(map[string]string) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 map[string]string + } + SetManagedFieldsStub func([]v1.ManagedFieldsEntry) + setManagedFieldsMutex sync.RWMutex + setManagedFieldsArgsForCall []struct { + arg1 []v1.ManagedFieldsEntry + } + SetNameStub func(string) + setNameMutex sync.RWMutex + setNameArgsForCall []struct { + arg1 string + } + SetNamespaceStub func(string) + setNamespaceMutex sync.RWMutex + setNamespaceArgsForCall []struct { + arg1 string + } + SetOwnerReferencesStub func([]v1.OwnerReference) + setOwnerReferencesMutex sync.RWMutex + setOwnerReferencesArgsForCall []struct { + arg1 []v1.OwnerReference + } + SetResourceVersionStub func(string) + setResourceVersionMutex sync.RWMutex + setResourceVersionArgsForCall []struct { + arg1 string + } + SetSelfLinkStub func(string) + setSelfLinkMutex sync.RWMutex + setSelfLinkArgsForCall []struct { + arg1 string + } + SetUIDStub func(types.UID) + setUIDMutex sync.RWMutex + setUIDArgsForCall []struct { + arg1 types.UID + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *ReenrollInstance) DeepCopyObject() runtime.Object { + fake.deepCopyObjectMutex.Lock() + ret, specificReturn := fake.deepCopyObjectReturnsOnCall[len(fake.deepCopyObjectArgsForCall)] + fake.deepCopyObjectArgsForCall = append(fake.deepCopyObjectArgsForCall, struct { + }{}) + stub := fake.DeepCopyObjectStub + fakeReturns := fake.deepCopyObjectReturns + fake.recordInvocation("DeepCopyObject", []interface{}{}) + fake.deepCopyObjectMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) DeepCopyObjectCallCount() int { + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + return len(fake.deepCopyObjectArgsForCall) +} + +func (fake *ReenrollInstance) DeepCopyObjectCalls(stub func() runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = stub +} + +func (fake *ReenrollInstance) DeepCopyObjectReturns(result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + fake.deepCopyObjectReturns = struct { + result1 runtime.Object + }{result1} +} + +func (fake *ReenrollInstance) DeepCopyObjectReturnsOnCall(i int, result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + if fake.deepCopyObjectReturnsOnCall == nil { + fake.deepCopyObjectReturnsOnCall = make(map[int]struct { + result1 runtime.Object + }) + } + fake.deepCopyObjectReturnsOnCall[i] = struct { + result1 runtime.Object + }{result1} +} + +func (fake *ReenrollInstance) GetAnnotations() map[string]string { + fake.getAnnotationsMutex.Lock() + ret, specificReturn := fake.getAnnotationsReturnsOnCall[len(fake.getAnnotationsArgsForCall)] + fake.getAnnotationsArgsForCall = append(fake.getAnnotationsArgsForCall, struct { + }{}) + stub := fake.GetAnnotationsStub + fakeReturns := fake.getAnnotationsReturns + fake.recordInvocation("GetAnnotations", []interface{}{}) + fake.getAnnotationsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetAnnotationsCallCount() int { + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + return len(fake.getAnnotationsArgsForCall) +} + +func (fake *ReenrollInstance) GetAnnotationsCalls(stub func() map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = stub +} + +func (fake *ReenrollInstance) GetAnnotationsReturns(result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + fake.getAnnotationsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *ReenrollInstance) GetAnnotationsReturnsOnCall(i int, result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + if fake.getAnnotationsReturnsOnCall == nil { + fake.getAnnotationsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getAnnotationsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *ReenrollInstance) GetClusterName() string { + fake.getClusterNameMutex.Lock() + ret, specificReturn := fake.getClusterNameReturnsOnCall[len(fake.getClusterNameArgsForCall)] + fake.getClusterNameArgsForCall = append(fake.getClusterNameArgsForCall, struct { + }{}) + stub := fake.GetClusterNameStub + fakeReturns := fake.getClusterNameReturns + fake.recordInvocation("GetClusterName", []interface{}{}) + fake.getClusterNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetClusterNameCallCount() int { + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + return len(fake.getClusterNameArgsForCall) +} + +func (fake *ReenrollInstance) GetClusterNameCalls(stub func() string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = stub +} + +func (fake *ReenrollInstance) GetClusterNameReturns(result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + fake.getClusterNameReturns = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetClusterNameReturnsOnCall(i int, result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + if fake.getClusterNameReturnsOnCall == nil { + fake.getClusterNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getClusterNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetCreationTimestamp() v1.Time { + fake.getCreationTimestampMutex.Lock() + ret, specificReturn := fake.getCreationTimestampReturnsOnCall[len(fake.getCreationTimestampArgsForCall)] + fake.getCreationTimestampArgsForCall = append(fake.getCreationTimestampArgsForCall, struct { + }{}) + stub := fake.GetCreationTimestampStub + fakeReturns := fake.getCreationTimestampReturns + fake.recordInvocation("GetCreationTimestamp", []interface{}{}) + fake.getCreationTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetCreationTimestampCallCount() int { + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + return len(fake.getCreationTimestampArgsForCall) +} + +func (fake *ReenrollInstance) GetCreationTimestampCalls(stub func() v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = stub +} + +func (fake *ReenrollInstance) GetCreationTimestampReturns(result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + fake.getCreationTimestampReturns = struct { + result1 v1.Time + }{result1} +} + +func (fake *ReenrollInstance) GetCreationTimestampReturnsOnCall(i int, result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + if fake.getCreationTimestampReturnsOnCall == nil { + fake.getCreationTimestampReturnsOnCall = make(map[int]struct { + result1 v1.Time + }) + } + fake.getCreationTimestampReturnsOnCall[i] = struct { + result1 v1.Time + }{result1} +} + +func (fake *ReenrollInstance) GetDeletionGracePeriodSeconds() *int64 { + fake.getDeletionGracePeriodSecondsMutex.Lock() + ret, specificReturn := fake.getDeletionGracePeriodSecondsReturnsOnCall[len(fake.getDeletionGracePeriodSecondsArgsForCall)] + fake.getDeletionGracePeriodSecondsArgsForCall = append(fake.getDeletionGracePeriodSecondsArgsForCall, struct { + }{}) + stub := fake.GetDeletionGracePeriodSecondsStub + fakeReturns := fake.getDeletionGracePeriodSecondsReturns + fake.recordInvocation("GetDeletionGracePeriodSeconds", []interface{}{}) + fake.getDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetDeletionGracePeriodSecondsCallCount() int { + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.getDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *ReenrollInstance) GetDeletionGracePeriodSecondsCalls(stub func() *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = stub +} + +func (fake *ReenrollInstance) GetDeletionGracePeriodSecondsReturns(result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + fake.getDeletionGracePeriodSecondsReturns = struct { + result1 *int64 + }{result1} +} + +func (fake *ReenrollInstance) GetDeletionGracePeriodSecondsReturnsOnCall(i int, result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + if fake.getDeletionGracePeriodSecondsReturnsOnCall == nil { + fake.getDeletionGracePeriodSecondsReturnsOnCall = make(map[int]struct { + result1 *int64 + }) + } + fake.getDeletionGracePeriodSecondsReturnsOnCall[i] = struct { + result1 *int64 + }{result1} +} + +func (fake *ReenrollInstance) GetDeletionTimestamp() *v1.Time { + fake.getDeletionTimestampMutex.Lock() + ret, specificReturn := fake.getDeletionTimestampReturnsOnCall[len(fake.getDeletionTimestampArgsForCall)] + fake.getDeletionTimestampArgsForCall = append(fake.getDeletionTimestampArgsForCall, struct { + }{}) + stub := fake.GetDeletionTimestampStub + fakeReturns := fake.getDeletionTimestampReturns + fake.recordInvocation("GetDeletionTimestamp", []interface{}{}) + fake.getDeletionTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetDeletionTimestampCallCount() int { + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + return len(fake.getDeletionTimestampArgsForCall) +} + +func (fake *ReenrollInstance) GetDeletionTimestampCalls(stub func() *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = stub +} + +func (fake *ReenrollInstance) GetDeletionTimestampReturns(result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + fake.getDeletionTimestampReturns = struct { + result1 *v1.Time + }{result1} +} + +func (fake *ReenrollInstance) GetDeletionTimestampReturnsOnCall(i int, result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + if fake.getDeletionTimestampReturnsOnCall == nil { + fake.getDeletionTimestampReturnsOnCall = make(map[int]struct { + result1 *v1.Time + }) + } + fake.getDeletionTimestampReturnsOnCall[i] = struct { + result1 *v1.Time + }{result1} +} + +func (fake *ReenrollInstance) GetFinalizers() []string { + fake.getFinalizersMutex.Lock() + ret, specificReturn := fake.getFinalizersReturnsOnCall[len(fake.getFinalizersArgsForCall)] + fake.getFinalizersArgsForCall = append(fake.getFinalizersArgsForCall, struct { + }{}) + stub := fake.GetFinalizersStub + fakeReturns := fake.getFinalizersReturns + fake.recordInvocation("GetFinalizers", []interface{}{}) + fake.getFinalizersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetFinalizersCallCount() int { + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + return len(fake.getFinalizersArgsForCall) +} + +func (fake *ReenrollInstance) GetFinalizersCalls(stub func() []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = stub +} + +func (fake *ReenrollInstance) GetFinalizersReturns(result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + fake.getFinalizersReturns = struct { + result1 []string + }{result1} +} + +func (fake *ReenrollInstance) GetFinalizersReturnsOnCall(i int, result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + if fake.getFinalizersReturnsOnCall == nil { + fake.getFinalizersReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getFinalizersReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *ReenrollInstance) GetGenerateName() string { + fake.getGenerateNameMutex.Lock() + ret, specificReturn := fake.getGenerateNameReturnsOnCall[len(fake.getGenerateNameArgsForCall)] + fake.getGenerateNameArgsForCall = append(fake.getGenerateNameArgsForCall, struct { + }{}) + stub := fake.GetGenerateNameStub + fakeReturns := fake.getGenerateNameReturns + fake.recordInvocation("GetGenerateName", []interface{}{}) + fake.getGenerateNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetGenerateNameCallCount() int { + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + return len(fake.getGenerateNameArgsForCall) +} + +func (fake *ReenrollInstance) GetGenerateNameCalls(stub func() string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = stub +} + +func (fake *ReenrollInstance) GetGenerateNameReturns(result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + fake.getGenerateNameReturns = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetGenerateNameReturnsOnCall(i int, result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + if fake.getGenerateNameReturnsOnCall == nil { + fake.getGenerateNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getGenerateNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetGeneration() int64 { + fake.getGenerationMutex.Lock() + ret, specificReturn := fake.getGenerationReturnsOnCall[len(fake.getGenerationArgsForCall)] + fake.getGenerationArgsForCall = append(fake.getGenerationArgsForCall, struct { + }{}) + stub := fake.GetGenerationStub + fakeReturns := fake.getGenerationReturns + fake.recordInvocation("GetGeneration", []interface{}{}) + fake.getGenerationMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetGenerationCallCount() int { + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + return len(fake.getGenerationArgsForCall) +} + +func (fake *ReenrollInstance) GetGenerationCalls(stub func() int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = stub +} + +func (fake *ReenrollInstance) GetGenerationReturns(result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + fake.getGenerationReturns = struct { + result1 int64 + }{result1} +} + +func (fake *ReenrollInstance) GetGenerationReturnsOnCall(i int, result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + if fake.getGenerationReturnsOnCall == nil { + fake.getGenerationReturnsOnCall = make(map[int]struct { + result1 int64 + }) + } + fake.getGenerationReturnsOnCall[i] = struct { + result1 int64 + }{result1} +} + +func (fake *ReenrollInstance) GetLabels() map[string]string { + fake.getLabelsMutex.Lock() + ret, specificReturn := fake.getLabelsReturnsOnCall[len(fake.getLabelsArgsForCall)] + fake.getLabelsArgsForCall = append(fake.getLabelsArgsForCall, struct { + }{}) + stub := fake.GetLabelsStub + fakeReturns := fake.getLabelsReturns + fake.recordInvocation("GetLabels", []interface{}{}) + fake.getLabelsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetLabelsCallCount() int { + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + return len(fake.getLabelsArgsForCall) +} + +func (fake *ReenrollInstance) GetLabelsCalls(stub func() map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = stub +} + +func (fake *ReenrollInstance) GetLabelsReturns(result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + fake.getLabelsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *ReenrollInstance) GetLabelsReturnsOnCall(i int, result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + if fake.getLabelsReturnsOnCall == nil { + fake.getLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getLabelsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *ReenrollInstance) GetManagedFields() []v1.ManagedFieldsEntry { + fake.getManagedFieldsMutex.Lock() + ret, specificReturn := fake.getManagedFieldsReturnsOnCall[len(fake.getManagedFieldsArgsForCall)] + fake.getManagedFieldsArgsForCall = append(fake.getManagedFieldsArgsForCall, struct { + }{}) + stub := fake.GetManagedFieldsStub + fakeReturns := fake.getManagedFieldsReturns + fake.recordInvocation("GetManagedFields", []interface{}{}) + fake.getManagedFieldsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetManagedFieldsCallCount() int { + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + return len(fake.getManagedFieldsArgsForCall) +} + +func (fake *ReenrollInstance) GetManagedFieldsCalls(stub func() []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = stub +} + +func (fake *ReenrollInstance) GetManagedFieldsReturns(result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + fake.getManagedFieldsReturns = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *ReenrollInstance) GetManagedFieldsReturnsOnCall(i int, result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + if fake.getManagedFieldsReturnsOnCall == nil { + fake.getManagedFieldsReturnsOnCall = make(map[int]struct { + result1 []v1.ManagedFieldsEntry + }) + } + fake.getManagedFieldsReturnsOnCall[i] = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *ReenrollInstance) GetName() string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + }{}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *ReenrollInstance) GetNameCalls(stub func() string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *ReenrollInstance) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetNamespace() string { + fake.getNamespaceMutex.Lock() + ret, specificReturn := fake.getNamespaceReturnsOnCall[len(fake.getNamespaceArgsForCall)] + fake.getNamespaceArgsForCall = append(fake.getNamespaceArgsForCall, struct { + }{}) + stub := fake.GetNamespaceStub + fakeReturns := fake.getNamespaceReturns + fake.recordInvocation("GetNamespace", []interface{}{}) + fake.getNamespaceMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetNamespaceCallCount() int { + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + return len(fake.getNamespaceArgsForCall) +} + +func (fake *ReenrollInstance) GetNamespaceCalls(stub func() string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = stub +} + +func (fake *ReenrollInstance) GetNamespaceReturns(result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + fake.getNamespaceReturns = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetNamespaceReturnsOnCall(i int, result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + if fake.getNamespaceReturnsOnCall == nil { + fake.getNamespaceReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNamespaceReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetObjectKind() schema.ObjectKind { + fake.getObjectKindMutex.Lock() + ret, specificReturn := fake.getObjectKindReturnsOnCall[len(fake.getObjectKindArgsForCall)] + fake.getObjectKindArgsForCall = append(fake.getObjectKindArgsForCall, struct { + }{}) + stub := fake.GetObjectKindStub + fakeReturns := fake.getObjectKindReturns + fake.recordInvocation("GetObjectKind", []interface{}{}) + fake.getObjectKindMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetObjectKindCallCount() int { + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + return len(fake.getObjectKindArgsForCall) +} + +func (fake *ReenrollInstance) GetObjectKindCalls(stub func() schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = stub +} + +func (fake *ReenrollInstance) GetObjectKindReturns(result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + fake.getObjectKindReturns = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *ReenrollInstance) GetObjectKindReturnsOnCall(i int, result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + if fake.getObjectKindReturnsOnCall == nil { + fake.getObjectKindReturnsOnCall = make(map[int]struct { + result1 schema.ObjectKind + }) + } + fake.getObjectKindReturnsOnCall[i] = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *ReenrollInstance) GetOwnerReferences() []v1.OwnerReference { + fake.getOwnerReferencesMutex.Lock() + ret, specificReturn := fake.getOwnerReferencesReturnsOnCall[len(fake.getOwnerReferencesArgsForCall)] + fake.getOwnerReferencesArgsForCall = append(fake.getOwnerReferencesArgsForCall, struct { + }{}) + stub := fake.GetOwnerReferencesStub + fakeReturns := fake.getOwnerReferencesReturns + fake.recordInvocation("GetOwnerReferences", []interface{}{}) + fake.getOwnerReferencesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetOwnerReferencesCallCount() int { + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + return len(fake.getOwnerReferencesArgsForCall) +} + +func (fake *ReenrollInstance) GetOwnerReferencesCalls(stub func() []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = stub +} + +func (fake *ReenrollInstance) GetOwnerReferencesReturns(result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + fake.getOwnerReferencesReturns = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *ReenrollInstance) GetOwnerReferencesReturnsOnCall(i int, result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + if fake.getOwnerReferencesReturnsOnCall == nil { + fake.getOwnerReferencesReturnsOnCall = make(map[int]struct { + result1 []v1.OwnerReference + }) + } + fake.getOwnerReferencesReturnsOnCall[i] = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *ReenrollInstance) GetResourceVersion() string { + fake.getResourceVersionMutex.Lock() + ret, specificReturn := fake.getResourceVersionReturnsOnCall[len(fake.getResourceVersionArgsForCall)] + fake.getResourceVersionArgsForCall = append(fake.getResourceVersionArgsForCall, struct { + }{}) + stub := fake.GetResourceVersionStub + fakeReturns := fake.getResourceVersionReturns + fake.recordInvocation("GetResourceVersion", []interface{}{}) + fake.getResourceVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetResourceVersionCallCount() int { + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + return len(fake.getResourceVersionArgsForCall) +} + +func (fake *ReenrollInstance) GetResourceVersionCalls(stub func() string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = stub +} + +func (fake *ReenrollInstance) GetResourceVersionReturns(result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + fake.getResourceVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetResourceVersionReturnsOnCall(i int, result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + if fake.getResourceVersionReturnsOnCall == nil { + fake.getResourceVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getResourceVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetSelfLink() string { + fake.getSelfLinkMutex.Lock() + ret, specificReturn := fake.getSelfLinkReturnsOnCall[len(fake.getSelfLinkArgsForCall)] + fake.getSelfLinkArgsForCall = append(fake.getSelfLinkArgsForCall, struct { + }{}) + stub := fake.GetSelfLinkStub + fakeReturns := fake.getSelfLinkReturns + fake.recordInvocation("GetSelfLink", []interface{}{}) + fake.getSelfLinkMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetSelfLinkCallCount() int { + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + return len(fake.getSelfLinkArgsForCall) +} + +func (fake *ReenrollInstance) GetSelfLinkCalls(stub func() string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = stub +} + +func (fake *ReenrollInstance) GetSelfLinkReturns(result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + fake.getSelfLinkReturns = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetSelfLinkReturnsOnCall(i int, result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + if fake.getSelfLinkReturnsOnCall == nil { + fake.getSelfLinkReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getSelfLinkReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ReenrollInstance) GetUID() types.UID { + fake.getUIDMutex.Lock() + ret, specificReturn := fake.getUIDReturnsOnCall[len(fake.getUIDArgsForCall)] + fake.getUIDArgsForCall = append(fake.getUIDArgsForCall, struct { + }{}) + stub := fake.GetUIDStub + fakeReturns := fake.getUIDReturns + fake.recordInvocation("GetUID", []interface{}{}) + fake.getUIDMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ReenrollInstance) GetUIDCallCount() int { + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + return len(fake.getUIDArgsForCall) +} + +func (fake *ReenrollInstance) GetUIDCalls(stub func() types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = stub +} + +func (fake *ReenrollInstance) GetUIDReturns(result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + fake.getUIDReturns = struct { + result1 types.UID + }{result1} +} + +func (fake *ReenrollInstance) GetUIDReturnsOnCall(i int, result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + if fake.getUIDReturnsOnCall == nil { + fake.getUIDReturnsOnCall = make(map[int]struct { + result1 types.UID + }) + } + fake.getUIDReturnsOnCall[i] = struct { + result1 types.UID + }{result1} +} + +func (fake *ReenrollInstance) ResetEcertReenroll() { + fake.resetEcertReenrollMutex.Lock() + fake.resetEcertReenrollArgsForCall = append(fake.resetEcertReenrollArgsForCall, struct { + }{}) + stub := fake.ResetEcertReenrollStub + fake.recordInvocation("ResetEcertReenroll", []interface{}{}) + fake.resetEcertReenrollMutex.Unlock() + if stub != nil { + fake.ResetEcertReenrollStub() + } +} + +func (fake *ReenrollInstance) ResetEcertReenrollCallCount() int { + fake.resetEcertReenrollMutex.RLock() + defer fake.resetEcertReenrollMutex.RUnlock() + return len(fake.resetEcertReenrollArgsForCall) +} + +func (fake *ReenrollInstance) ResetEcertReenrollCalls(stub func()) { + fake.resetEcertReenrollMutex.Lock() + defer fake.resetEcertReenrollMutex.Unlock() + fake.ResetEcertReenrollStub = stub +} + +func (fake *ReenrollInstance) ResetTLSReenroll() { + fake.resetTLSReenrollMutex.Lock() + fake.resetTLSReenrollArgsForCall = append(fake.resetTLSReenrollArgsForCall, struct { + }{}) + stub := fake.ResetTLSReenrollStub + fake.recordInvocation("ResetTLSReenroll", []interface{}{}) + fake.resetTLSReenrollMutex.Unlock() + if stub != nil { + fake.ResetTLSReenrollStub() + } +} + +func (fake *ReenrollInstance) ResetTLSReenrollCallCount() int { + fake.resetTLSReenrollMutex.RLock() + defer fake.resetTLSReenrollMutex.RUnlock() + return len(fake.resetTLSReenrollArgsForCall) +} + +func (fake *ReenrollInstance) ResetTLSReenrollCalls(stub func()) { + fake.resetTLSReenrollMutex.Lock() + defer fake.resetTLSReenrollMutex.Unlock() + fake.ResetTLSReenrollStub = stub +} + +func (fake *ReenrollInstance) SetAnnotations(arg1 map[string]string) { + fake.setAnnotationsMutex.Lock() + fake.setAnnotationsArgsForCall = append(fake.setAnnotationsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetAnnotationsStub + fake.recordInvocation("SetAnnotations", []interface{}{arg1}) + fake.setAnnotationsMutex.Unlock() + if stub != nil { + fake.SetAnnotationsStub(arg1) + } +} + +func (fake *ReenrollInstance) SetAnnotationsCallCount() int { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + return len(fake.setAnnotationsArgsForCall) +} + +func (fake *ReenrollInstance) SetAnnotationsCalls(stub func(map[string]string)) { + fake.setAnnotationsMutex.Lock() + defer fake.setAnnotationsMutex.Unlock() + fake.SetAnnotationsStub = stub +} + +func (fake *ReenrollInstance) SetAnnotationsArgsForCall(i int) map[string]string { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + argsForCall := fake.setAnnotationsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetClusterName(arg1 string) { + fake.setClusterNameMutex.Lock() + fake.setClusterNameArgsForCall = append(fake.setClusterNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetClusterNameStub + fake.recordInvocation("SetClusterName", []interface{}{arg1}) + fake.setClusterNameMutex.Unlock() + if stub != nil { + fake.SetClusterNameStub(arg1) + } +} + +func (fake *ReenrollInstance) SetClusterNameCallCount() int { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + return len(fake.setClusterNameArgsForCall) +} + +func (fake *ReenrollInstance) SetClusterNameCalls(stub func(string)) { + fake.setClusterNameMutex.Lock() + defer fake.setClusterNameMutex.Unlock() + fake.SetClusterNameStub = stub +} + +func (fake *ReenrollInstance) SetClusterNameArgsForCall(i int) string { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + argsForCall := fake.setClusterNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetCreationTimestamp(arg1 v1.Time) { + fake.setCreationTimestampMutex.Lock() + fake.setCreationTimestampArgsForCall = append(fake.setCreationTimestampArgsForCall, struct { + arg1 v1.Time + }{arg1}) + stub := fake.SetCreationTimestampStub + fake.recordInvocation("SetCreationTimestamp", []interface{}{arg1}) + fake.setCreationTimestampMutex.Unlock() + if stub != nil { + fake.SetCreationTimestampStub(arg1) + } +} + +func (fake *ReenrollInstance) SetCreationTimestampCallCount() int { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + return len(fake.setCreationTimestampArgsForCall) +} + +func (fake *ReenrollInstance) SetCreationTimestampCalls(stub func(v1.Time)) { + fake.setCreationTimestampMutex.Lock() + defer fake.setCreationTimestampMutex.Unlock() + fake.SetCreationTimestampStub = stub +} + +func (fake *ReenrollInstance) SetCreationTimestampArgsForCall(i int) v1.Time { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + argsForCall := fake.setCreationTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetDeletionGracePeriodSeconds(arg1 *int64) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + fake.setDeletionGracePeriodSecondsArgsForCall = append(fake.setDeletionGracePeriodSecondsArgsForCall, struct { + arg1 *int64 + }{arg1}) + stub := fake.SetDeletionGracePeriodSecondsStub + fake.recordInvocation("SetDeletionGracePeriodSeconds", []interface{}{arg1}) + fake.setDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + fake.SetDeletionGracePeriodSecondsStub(arg1) + } +} + +func (fake *ReenrollInstance) SetDeletionGracePeriodSecondsCallCount() int { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.setDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *ReenrollInstance) SetDeletionGracePeriodSecondsCalls(stub func(*int64)) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + defer fake.setDeletionGracePeriodSecondsMutex.Unlock() + fake.SetDeletionGracePeriodSecondsStub = stub +} + +func (fake *ReenrollInstance) SetDeletionGracePeriodSecondsArgsForCall(i int) *int64 { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + argsForCall := fake.setDeletionGracePeriodSecondsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetDeletionTimestamp(arg1 *v1.Time) { + fake.setDeletionTimestampMutex.Lock() + fake.setDeletionTimestampArgsForCall = append(fake.setDeletionTimestampArgsForCall, struct { + arg1 *v1.Time + }{arg1}) + stub := fake.SetDeletionTimestampStub + fake.recordInvocation("SetDeletionTimestamp", []interface{}{arg1}) + fake.setDeletionTimestampMutex.Unlock() + if stub != nil { + fake.SetDeletionTimestampStub(arg1) + } +} + +func (fake *ReenrollInstance) SetDeletionTimestampCallCount() int { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + return len(fake.setDeletionTimestampArgsForCall) +} + +func (fake *ReenrollInstance) SetDeletionTimestampCalls(stub func(*v1.Time)) { + fake.setDeletionTimestampMutex.Lock() + defer fake.setDeletionTimestampMutex.Unlock() + fake.SetDeletionTimestampStub = stub +} + +func (fake *ReenrollInstance) SetDeletionTimestampArgsForCall(i int) *v1.Time { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + argsForCall := fake.setDeletionTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetFinalizers(arg1 []string) { + var arg1Copy []string + if arg1 != nil { + arg1Copy = make([]string, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setFinalizersMutex.Lock() + fake.setFinalizersArgsForCall = append(fake.setFinalizersArgsForCall, struct { + arg1 []string + }{arg1Copy}) + stub := fake.SetFinalizersStub + fake.recordInvocation("SetFinalizers", []interface{}{arg1Copy}) + fake.setFinalizersMutex.Unlock() + if stub != nil { + fake.SetFinalizersStub(arg1) + } +} + +func (fake *ReenrollInstance) SetFinalizersCallCount() int { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + return len(fake.setFinalizersArgsForCall) +} + +func (fake *ReenrollInstance) SetFinalizersCalls(stub func([]string)) { + fake.setFinalizersMutex.Lock() + defer fake.setFinalizersMutex.Unlock() + fake.SetFinalizersStub = stub +} + +func (fake *ReenrollInstance) SetFinalizersArgsForCall(i int) []string { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + argsForCall := fake.setFinalizersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetGenerateName(arg1 string) { + fake.setGenerateNameMutex.Lock() + fake.setGenerateNameArgsForCall = append(fake.setGenerateNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetGenerateNameStub + fake.recordInvocation("SetGenerateName", []interface{}{arg1}) + fake.setGenerateNameMutex.Unlock() + if stub != nil { + fake.SetGenerateNameStub(arg1) + } +} + +func (fake *ReenrollInstance) SetGenerateNameCallCount() int { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + return len(fake.setGenerateNameArgsForCall) +} + +func (fake *ReenrollInstance) SetGenerateNameCalls(stub func(string)) { + fake.setGenerateNameMutex.Lock() + defer fake.setGenerateNameMutex.Unlock() + fake.SetGenerateNameStub = stub +} + +func (fake *ReenrollInstance) SetGenerateNameArgsForCall(i int) string { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + argsForCall := fake.setGenerateNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetGeneration(arg1 int64) { + fake.setGenerationMutex.Lock() + fake.setGenerationArgsForCall = append(fake.setGenerationArgsForCall, struct { + arg1 int64 + }{arg1}) + stub := fake.SetGenerationStub + fake.recordInvocation("SetGeneration", []interface{}{arg1}) + fake.setGenerationMutex.Unlock() + if stub != nil { + fake.SetGenerationStub(arg1) + } +} + +func (fake *ReenrollInstance) SetGenerationCallCount() int { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + return len(fake.setGenerationArgsForCall) +} + +func (fake *ReenrollInstance) SetGenerationCalls(stub func(int64)) { + fake.setGenerationMutex.Lock() + defer fake.setGenerationMutex.Unlock() + fake.SetGenerationStub = stub +} + +func (fake *ReenrollInstance) SetGenerationArgsForCall(i int) int64 { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + argsForCall := fake.setGenerationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetLabels(arg1 map[string]string) { + fake.setLabelsMutex.Lock() + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetLabelsStub + fake.recordInvocation("SetLabels", []interface{}{arg1}) + fake.setLabelsMutex.Unlock() + if stub != nil { + fake.SetLabelsStub(arg1) + } +} + +func (fake *ReenrollInstance) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *ReenrollInstance) SetLabelsCalls(stub func(map[string]string)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *ReenrollInstance) SetLabelsArgsForCall(i int) map[string]string { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetManagedFields(arg1 []v1.ManagedFieldsEntry) { + var arg1Copy []v1.ManagedFieldsEntry + if arg1 != nil { + arg1Copy = make([]v1.ManagedFieldsEntry, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setManagedFieldsMutex.Lock() + fake.setManagedFieldsArgsForCall = append(fake.setManagedFieldsArgsForCall, struct { + arg1 []v1.ManagedFieldsEntry + }{arg1Copy}) + stub := fake.SetManagedFieldsStub + fake.recordInvocation("SetManagedFields", []interface{}{arg1Copy}) + fake.setManagedFieldsMutex.Unlock() + if stub != nil { + fake.SetManagedFieldsStub(arg1) + } +} + +func (fake *ReenrollInstance) SetManagedFieldsCallCount() int { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + return len(fake.setManagedFieldsArgsForCall) +} + +func (fake *ReenrollInstance) SetManagedFieldsCalls(stub func([]v1.ManagedFieldsEntry)) { + fake.setManagedFieldsMutex.Lock() + defer fake.setManagedFieldsMutex.Unlock() + fake.SetManagedFieldsStub = stub +} + +func (fake *ReenrollInstance) SetManagedFieldsArgsForCall(i int) []v1.ManagedFieldsEntry { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + argsForCall := fake.setManagedFieldsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetName(arg1 string) { + fake.setNameMutex.Lock() + fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNameStub + fake.recordInvocation("SetName", []interface{}{arg1}) + fake.setNameMutex.Unlock() + if stub != nil { + fake.SetNameStub(arg1) + } +} + +func (fake *ReenrollInstance) SetNameCallCount() int { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + return len(fake.setNameArgsForCall) +} + +func (fake *ReenrollInstance) SetNameCalls(stub func(string)) { + fake.setNameMutex.Lock() + defer fake.setNameMutex.Unlock() + fake.SetNameStub = stub +} + +func (fake *ReenrollInstance) SetNameArgsForCall(i int) string { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + argsForCall := fake.setNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetNamespace(arg1 string) { + fake.setNamespaceMutex.Lock() + fake.setNamespaceArgsForCall = append(fake.setNamespaceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNamespaceStub + fake.recordInvocation("SetNamespace", []interface{}{arg1}) + fake.setNamespaceMutex.Unlock() + if stub != nil { + fake.SetNamespaceStub(arg1) + } +} + +func (fake *ReenrollInstance) SetNamespaceCallCount() int { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + return len(fake.setNamespaceArgsForCall) +} + +func (fake *ReenrollInstance) SetNamespaceCalls(stub func(string)) { + fake.setNamespaceMutex.Lock() + defer fake.setNamespaceMutex.Unlock() + fake.SetNamespaceStub = stub +} + +func (fake *ReenrollInstance) SetNamespaceArgsForCall(i int) string { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + argsForCall := fake.setNamespaceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetOwnerReferences(arg1 []v1.OwnerReference) { + var arg1Copy []v1.OwnerReference + if arg1 != nil { + arg1Copy = make([]v1.OwnerReference, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setOwnerReferencesMutex.Lock() + fake.setOwnerReferencesArgsForCall = append(fake.setOwnerReferencesArgsForCall, struct { + arg1 []v1.OwnerReference + }{arg1Copy}) + stub := fake.SetOwnerReferencesStub + fake.recordInvocation("SetOwnerReferences", []interface{}{arg1Copy}) + fake.setOwnerReferencesMutex.Unlock() + if stub != nil { + fake.SetOwnerReferencesStub(arg1) + } +} + +func (fake *ReenrollInstance) SetOwnerReferencesCallCount() int { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + return len(fake.setOwnerReferencesArgsForCall) +} + +func (fake *ReenrollInstance) SetOwnerReferencesCalls(stub func([]v1.OwnerReference)) { + fake.setOwnerReferencesMutex.Lock() + defer fake.setOwnerReferencesMutex.Unlock() + fake.SetOwnerReferencesStub = stub +} + +func (fake *ReenrollInstance) SetOwnerReferencesArgsForCall(i int) []v1.OwnerReference { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + argsForCall := fake.setOwnerReferencesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetResourceVersion(arg1 string) { + fake.setResourceVersionMutex.Lock() + fake.setResourceVersionArgsForCall = append(fake.setResourceVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetResourceVersionStub + fake.recordInvocation("SetResourceVersion", []interface{}{arg1}) + fake.setResourceVersionMutex.Unlock() + if stub != nil { + fake.SetResourceVersionStub(arg1) + } +} + +func (fake *ReenrollInstance) SetResourceVersionCallCount() int { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + return len(fake.setResourceVersionArgsForCall) +} + +func (fake *ReenrollInstance) SetResourceVersionCalls(stub func(string)) { + fake.setResourceVersionMutex.Lock() + defer fake.setResourceVersionMutex.Unlock() + fake.SetResourceVersionStub = stub +} + +func (fake *ReenrollInstance) SetResourceVersionArgsForCall(i int) string { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + argsForCall := fake.setResourceVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetSelfLink(arg1 string) { + fake.setSelfLinkMutex.Lock() + fake.setSelfLinkArgsForCall = append(fake.setSelfLinkArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetSelfLinkStub + fake.recordInvocation("SetSelfLink", []interface{}{arg1}) + fake.setSelfLinkMutex.Unlock() + if stub != nil { + fake.SetSelfLinkStub(arg1) + } +} + +func (fake *ReenrollInstance) SetSelfLinkCallCount() int { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + return len(fake.setSelfLinkArgsForCall) +} + +func (fake *ReenrollInstance) SetSelfLinkCalls(stub func(string)) { + fake.setSelfLinkMutex.Lock() + defer fake.setSelfLinkMutex.Unlock() + fake.SetSelfLinkStub = stub +} + +func (fake *ReenrollInstance) SetSelfLinkArgsForCall(i int) string { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + argsForCall := fake.setSelfLinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) SetUID(arg1 types.UID) { + fake.setUIDMutex.Lock() + fake.setUIDArgsForCall = append(fake.setUIDArgsForCall, struct { + arg1 types.UID + }{arg1}) + stub := fake.SetUIDStub + fake.recordInvocation("SetUID", []interface{}{arg1}) + fake.setUIDMutex.Unlock() + if stub != nil { + fake.SetUIDStub(arg1) + } +} + +func (fake *ReenrollInstance) SetUIDCallCount() int { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + return len(fake.setUIDArgsForCall) +} + +func (fake *ReenrollInstance) SetUIDCalls(stub func(types.UID)) { + fake.setUIDMutex.Lock() + defer fake.setUIDMutex.Unlock() + fake.SetUIDStub = stub +} + +func (fake *ReenrollInstance) SetUIDArgsForCall(i int) types.UID { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + argsForCall := fake.setUIDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReenrollInstance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + fake.resetEcertReenrollMutex.RLock() + defer fake.resetEcertReenrollMutex.RUnlock() + fake.resetTLSReenrollMutex.RLock() + defer fake.resetTLSReenrollMutex.RUnlock() + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *ReenrollInstance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ action.ReenrollInstance = new(ReenrollInstance) diff --git a/pkg/action/mocks/upgradeinstance.go b/pkg/action/mocks/upgradeinstance.go new file mode 100644 index 00000000..8c9a0fcd --- /dev/null +++ b/pkg/action/mocks/upgradeinstance.go @@ -0,0 +1,2045 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +type UpgradeInstance struct { + DeepCopyObjectStub func() runtime.Object + deepCopyObjectMutex sync.RWMutex + deepCopyObjectArgsForCall []struct { + } + deepCopyObjectReturns struct { + result1 runtime.Object + } + deepCopyObjectReturnsOnCall map[int]struct { + result1 runtime.Object + } + GetAnnotationsStub func() map[string]string + getAnnotationsMutex sync.RWMutex + getAnnotationsArgsForCall []struct { + } + getAnnotationsReturns struct { + result1 map[string]string + } + getAnnotationsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetClusterNameStub func() string + getClusterNameMutex sync.RWMutex + getClusterNameArgsForCall []struct { + } + getClusterNameReturns struct { + result1 string + } + getClusterNameReturnsOnCall map[int]struct { + result1 string + } + GetCreationTimestampStub func() v1.Time + getCreationTimestampMutex sync.RWMutex + getCreationTimestampArgsForCall []struct { + } + getCreationTimestampReturns struct { + result1 v1.Time + } + getCreationTimestampReturnsOnCall map[int]struct { + result1 v1.Time + } + GetDeletionGracePeriodSecondsStub func() *int64 + getDeletionGracePeriodSecondsMutex sync.RWMutex + getDeletionGracePeriodSecondsArgsForCall []struct { + } + getDeletionGracePeriodSecondsReturns struct { + result1 *int64 + } + getDeletionGracePeriodSecondsReturnsOnCall map[int]struct { + result1 *int64 + } + GetDeletionTimestampStub func() *v1.Time + getDeletionTimestampMutex sync.RWMutex + getDeletionTimestampArgsForCall []struct { + } + getDeletionTimestampReturns struct { + result1 *v1.Time + } + getDeletionTimestampReturnsOnCall map[int]struct { + result1 *v1.Time + } + GetFinalizersStub func() []string + getFinalizersMutex sync.RWMutex + getFinalizersArgsForCall []struct { + } + getFinalizersReturns struct { + result1 []string + } + getFinalizersReturnsOnCall map[int]struct { + result1 []string + } + GetGenerateNameStub func() string + getGenerateNameMutex sync.RWMutex + getGenerateNameArgsForCall []struct { + } + getGenerateNameReturns struct { + result1 string + } + getGenerateNameReturnsOnCall map[int]struct { + result1 string + } + GetGenerationStub func() int64 + getGenerationMutex sync.RWMutex + getGenerationArgsForCall []struct { + } + getGenerationReturns struct { + result1 int64 + } + getGenerationReturnsOnCall map[int]struct { + result1 int64 + } + GetLabelsStub func() map[string]string + getLabelsMutex sync.RWMutex + getLabelsArgsForCall []struct { + } + getLabelsReturns struct { + result1 map[string]string + } + getLabelsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetManagedFieldsStub func() []v1.ManagedFieldsEntry + getManagedFieldsMutex sync.RWMutex + getManagedFieldsArgsForCall []struct { + } + getManagedFieldsReturns struct { + result1 []v1.ManagedFieldsEntry + } + getManagedFieldsReturnsOnCall map[int]struct { + result1 []v1.ManagedFieldsEntry + } + GetNameStub func() string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetNamespaceStub func() string + getNamespaceMutex sync.RWMutex + getNamespaceArgsForCall []struct { + } + getNamespaceReturns struct { + result1 string + } + getNamespaceReturnsOnCall map[int]struct { + result1 string + } + GetObjectKindStub func() schema.ObjectKind + getObjectKindMutex sync.RWMutex + getObjectKindArgsForCall []struct { + } + getObjectKindReturns struct { + result1 schema.ObjectKind + } + getObjectKindReturnsOnCall map[int]struct { + result1 schema.ObjectKind + } + GetOwnerReferencesStub func() []v1.OwnerReference + getOwnerReferencesMutex sync.RWMutex + getOwnerReferencesArgsForCall []struct { + } + getOwnerReferencesReturns struct { + result1 []v1.OwnerReference + } + getOwnerReferencesReturnsOnCall map[int]struct { + result1 []v1.OwnerReference + } + GetResourceVersionStub func() string + getResourceVersionMutex sync.RWMutex + getResourceVersionArgsForCall []struct { + } + getResourceVersionReturns struct { + result1 string + } + getResourceVersionReturnsOnCall map[int]struct { + result1 string + } + GetSelfLinkStub func() string + getSelfLinkMutex sync.RWMutex + getSelfLinkArgsForCall []struct { + } + getSelfLinkReturns struct { + result1 string + } + getSelfLinkReturnsOnCall map[int]struct { + result1 string + } + GetUIDStub func() types.UID + getUIDMutex sync.RWMutex + getUIDArgsForCall []struct { + } + getUIDReturns struct { + result1 types.UID + } + getUIDReturnsOnCall map[int]struct { + result1 types.UID + } + IsHSMEnabledStub func() bool + isHSMEnabledMutex sync.RWMutex + isHSMEnabledArgsForCall []struct { + } + isHSMEnabledReturns struct { + result1 bool + } + isHSMEnabledReturnsOnCall map[int]struct { + result1 bool + } + SetAnnotationsStub func(map[string]string) + setAnnotationsMutex sync.RWMutex + setAnnotationsArgsForCall []struct { + arg1 map[string]string + } + SetClusterNameStub func(string) + setClusterNameMutex sync.RWMutex + setClusterNameArgsForCall []struct { + arg1 string + } + SetCreationTimestampStub func(v1.Time) + setCreationTimestampMutex sync.RWMutex + setCreationTimestampArgsForCall []struct { + arg1 v1.Time + } + SetDeletionGracePeriodSecondsStub func(*int64) + setDeletionGracePeriodSecondsMutex sync.RWMutex + setDeletionGracePeriodSecondsArgsForCall []struct { + arg1 *int64 + } + SetDeletionTimestampStub func(*v1.Time) + setDeletionTimestampMutex sync.RWMutex + setDeletionTimestampArgsForCall []struct { + arg1 *v1.Time + } + SetFinalizersStub func([]string) + setFinalizersMutex sync.RWMutex + setFinalizersArgsForCall []struct { + arg1 []string + } + SetGenerateNameStub func(string) + setGenerateNameMutex sync.RWMutex + setGenerateNameArgsForCall []struct { + arg1 string + } + SetGenerationStub func(int64) + setGenerationMutex sync.RWMutex + setGenerationArgsForCall []struct { + arg1 int64 + } + SetLabelsStub func(map[string]string) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 map[string]string + } + SetManagedFieldsStub func([]v1.ManagedFieldsEntry) + setManagedFieldsMutex sync.RWMutex + setManagedFieldsArgsForCall []struct { + arg1 []v1.ManagedFieldsEntry + } + SetNameStub func(string) + setNameMutex sync.RWMutex + setNameArgsForCall []struct { + arg1 string + } + SetNamespaceStub func(string) + setNamespaceMutex sync.RWMutex + setNamespaceArgsForCall []struct { + arg1 string + } + SetOwnerReferencesStub func([]v1.OwnerReference) + setOwnerReferencesMutex sync.RWMutex + setOwnerReferencesArgsForCall []struct { + arg1 []v1.OwnerReference + } + SetResourceVersionStub func(string) + setResourceVersionMutex sync.RWMutex + setResourceVersionArgsForCall []struct { + arg1 string + } + SetSelfLinkStub func(string) + setSelfLinkMutex sync.RWMutex + setSelfLinkArgsForCall []struct { + arg1 string + } + SetUIDStub func(types.UID) + setUIDMutex sync.RWMutex + setUIDArgsForCall []struct { + arg1 types.UID + } + UsingCouchDBStub func() bool + usingCouchDBMutex sync.RWMutex + usingCouchDBArgsForCall []struct { + } + usingCouchDBReturns struct { + result1 bool + } + usingCouchDBReturnsOnCall map[int]struct { + result1 bool + } + UsingHSMProxyStub func() bool + usingHSMProxyMutex sync.RWMutex + usingHSMProxyArgsForCall []struct { + } + usingHSMProxyReturns struct { + result1 bool + } + usingHSMProxyReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *UpgradeInstance) DeepCopyObject() runtime.Object { + fake.deepCopyObjectMutex.Lock() + ret, specificReturn := fake.deepCopyObjectReturnsOnCall[len(fake.deepCopyObjectArgsForCall)] + fake.deepCopyObjectArgsForCall = append(fake.deepCopyObjectArgsForCall, struct { + }{}) + stub := fake.DeepCopyObjectStub + fakeReturns := fake.deepCopyObjectReturns + fake.recordInvocation("DeepCopyObject", []interface{}{}) + fake.deepCopyObjectMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) DeepCopyObjectCallCount() int { + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + return len(fake.deepCopyObjectArgsForCall) +} + +func (fake *UpgradeInstance) DeepCopyObjectCalls(stub func() runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = stub +} + +func (fake *UpgradeInstance) DeepCopyObjectReturns(result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + fake.deepCopyObjectReturns = struct { + result1 runtime.Object + }{result1} +} + +func (fake *UpgradeInstance) DeepCopyObjectReturnsOnCall(i int, result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + if fake.deepCopyObjectReturnsOnCall == nil { + fake.deepCopyObjectReturnsOnCall = make(map[int]struct { + result1 runtime.Object + }) + } + fake.deepCopyObjectReturnsOnCall[i] = struct { + result1 runtime.Object + }{result1} +} + +func (fake *UpgradeInstance) GetAnnotations() map[string]string { + fake.getAnnotationsMutex.Lock() + ret, specificReturn := fake.getAnnotationsReturnsOnCall[len(fake.getAnnotationsArgsForCall)] + fake.getAnnotationsArgsForCall = append(fake.getAnnotationsArgsForCall, struct { + }{}) + stub := fake.GetAnnotationsStub + fakeReturns := fake.getAnnotationsReturns + fake.recordInvocation("GetAnnotations", []interface{}{}) + fake.getAnnotationsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetAnnotationsCallCount() int { + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + return len(fake.getAnnotationsArgsForCall) +} + +func (fake *UpgradeInstance) GetAnnotationsCalls(stub func() map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = stub +} + +func (fake *UpgradeInstance) GetAnnotationsReturns(result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + fake.getAnnotationsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *UpgradeInstance) GetAnnotationsReturnsOnCall(i int, result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + if fake.getAnnotationsReturnsOnCall == nil { + fake.getAnnotationsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getAnnotationsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *UpgradeInstance) GetClusterName() string { + fake.getClusterNameMutex.Lock() + ret, specificReturn := fake.getClusterNameReturnsOnCall[len(fake.getClusterNameArgsForCall)] + fake.getClusterNameArgsForCall = append(fake.getClusterNameArgsForCall, struct { + }{}) + stub := fake.GetClusterNameStub + fakeReturns := fake.getClusterNameReturns + fake.recordInvocation("GetClusterName", []interface{}{}) + fake.getClusterNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetClusterNameCallCount() int { + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + return len(fake.getClusterNameArgsForCall) +} + +func (fake *UpgradeInstance) GetClusterNameCalls(stub func() string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = stub +} + +func (fake *UpgradeInstance) GetClusterNameReturns(result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + fake.getClusterNameReturns = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetClusterNameReturnsOnCall(i int, result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + if fake.getClusterNameReturnsOnCall == nil { + fake.getClusterNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getClusterNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetCreationTimestamp() v1.Time { + fake.getCreationTimestampMutex.Lock() + ret, specificReturn := fake.getCreationTimestampReturnsOnCall[len(fake.getCreationTimestampArgsForCall)] + fake.getCreationTimestampArgsForCall = append(fake.getCreationTimestampArgsForCall, struct { + }{}) + stub := fake.GetCreationTimestampStub + fakeReturns := fake.getCreationTimestampReturns + fake.recordInvocation("GetCreationTimestamp", []interface{}{}) + fake.getCreationTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetCreationTimestampCallCount() int { + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + return len(fake.getCreationTimestampArgsForCall) +} + +func (fake *UpgradeInstance) GetCreationTimestampCalls(stub func() v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = stub +} + +func (fake *UpgradeInstance) GetCreationTimestampReturns(result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + fake.getCreationTimestampReturns = struct { + result1 v1.Time + }{result1} +} + +func (fake *UpgradeInstance) GetCreationTimestampReturnsOnCall(i int, result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + if fake.getCreationTimestampReturnsOnCall == nil { + fake.getCreationTimestampReturnsOnCall = make(map[int]struct { + result1 v1.Time + }) + } + fake.getCreationTimestampReturnsOnCall[i] = struct { + result1 v1.Time + }{result1} +} + +func (fake *UpgradeInstance) GetDeletionGracePeriodSeconds() *int64 { + fake.getDeletionGracePeriodSecondsMutex.Lock() + ret, specificReturn := fake.getDeletionGracePeriodSecondsReturnsOnCall[len(fake.getDeletionGracePeriodSecondsArgsForCall)] + fake.getDeletionGracePeriodSecondsArgsForCall = append(fake.getDeletionGracePeriodSecondsArgsForCall, struct { + }{}) + stub := fake.GetDeletionGracePeriodSecondsStub + fakeReturns := fake.getDeletionGracePeriodSecondsReturns + fake.recordInvocation("GetDeletionGracePeriodSeconds", []interface{}{}) + fake.getDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetDeletionGracePeriodSecondsCallCount() int { + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.getDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *UpgradeInstance) GetDeletionGracePeriodSecondsCalls(stub func() *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = stub +} + +func (fake *UpgradeInstance) GetDeletionGracePeriodSecondsReturns(result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + fake.getDeletionGracePeriodSecondsReturns = struct { + result1 *int64 + }{result1} +} + +func (fake *UpgradeInstance) GetDeletionGracePeriodSecondsReturnsOnCall(i int, result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + if fake.getDeletionGracePeriodSecondsReturnsOnCall == nil { + fake.getDeletionGracePeriodSecondsReturnsOnCall = make(map[int]struct { + result1 *int64 + }) + } + fake.getDeletionGracePeriodSecondsReturnsOnCall[i] = struct { + result1 *int64 + }{result1} +} + +func (fake *UpgradeInstance) GetDeletionTimestamp() *v1.Time { + fake.getDeletionTimestampMutex.Lock() + ret, specificReturn := fake.getDeletionTimestampReturnsOnCall[len(fake.getDeletionTimestampArgsForCall)] + fake.getDeletionTimestampArgsForCall = append(fake.getDeletionTimestampArgsForCall, struct { + }{}) + stub := fake.GetDeletionTimestampStub + fakeReturns := fake.getDeletionTimestampReturns + fake.recordInvocation("GetDeletionTimestamp", []interface{}{}) + fake.getDeletionTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetDeletionTimestampCallCount() int { + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + return len(fake.getDeletionTimestampArgsForCall) +} + +func (fake *UpgradeInstance) GetDeletionTimestampCalls(stub func() *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = stub +} + +func (fake *UpgradeInstance) GetDeletionTimestampReturns(result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + fake.getDeletionTimestampReturns = struct { + result1 *v1.Time + }{result1} +} + +func (fake *UpgradeInstance) GetDeletionTimestampReturnsOnCall(i int, result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + if fake.getDeletionTimestampReturnsOnCall == nil { + fake.getDeletionTimestampReturnsOnCall = make(map[int]struct { + result1 *v1.Time + }) + } + fake.getDeletionTimestampReturnsOnCall[i] = struct { + result1 *v1.Time + }{result1} +} + +func (fake *UpgradeInstance) GetFinalizers() []string { + fake.getFinalizersMutex.Lock() + ret, specificReturn := fake.getFinalizersReturnsOnCall[len(fake.getFinalizersArgsForCall)] + fake.getFinalizersArgsForCall = append(fake.getFinalizersArgsForCall, struct { + }{}) + stub := fake.GetFinalizersStub + fakeReturns := fake.getFinalizersReturns + fake.recordInvocation("GetFinalizers", []interface{}{}) + fake.getFinalizersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetFinalizersCallCount() int { + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + return len(fake.getFinalizersArgsForCall) +} + +func (fake *UpgradeInstance) GetFinalizersCalls(stub func() []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = stub +} + +func (fake *UpgradeInstance) GetFinalizersReturns(result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + fake.getFinalizersReturns = struct { + result1 []string + }{result1} +} + +func (fake *UpgradeInstance) GetFinalizersReturnsOnCall(i int, result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + if fake.getFinalizersReturnsOnCall == nil { + fake.getFinalizersReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getFinalizersReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *UpgradeInstance) GetGenerateName() string { + fake.getGenerateNameMutex.Lock() + ret, specificReturn := fake.getGenerateNameReturnsOnCall[len(fake.getGenerateNameArgsForCall)] + fake.getGenerateNameArgsForCall = append(fake.getGenerateNameArgsForCall, struct { + }{}) + stub := fake.GetGenerateNameStub + fakeReturns := fake.getGenerateNameReturns + fake.recordInvocation("GetGenerateName", []interface{}{}) + fake.getGenerateNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetGenerateNameCallCount() int { + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + return len(fake.getGenerateNameArgsForCall) +} + +func (fake *UpgradeInstance) GetGenerateNameCalls(stub func() string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = stub +} + +func (fake *UpgradeInstance) GetGenerateNameReturns(result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + fake.getGenerateNameReturns = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetGenerateNameReturnsOnCall(i int, result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + if fake.getGenerateNameReturnsOnCall == nil { + fake.getGenerateNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getGenerateNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetGeneration() int64 { + fake.getGenerationMutex.Lock() + ret, specificReturn := fake.getGenerationReturnsOnCall[len(fake.getGenerationArgsForCall)] + fake.getGenerationArgsForCall = append(fake.getGenerationArgsForCall, struct { + }{}) + stub := fake.GetGenerationStub + fakeReturns := fake.getGenerationReturns + fake.recordInvocation("GetGeneration", []interface{}{}) + fake.getGenerationMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetGenerationCallCount() int { + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + return len(fake.getGenerationArgsForCall) +} + +func (fake *UpgradeInstance) GetGenerationCalls(stub func() int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = stub +} + +func (fake *UpgradeInstance) GetGenerationReturns(result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + fake.getGenerationReturns = struct { + result1 int64 + }{result1} +} + +func (fake *UpgradeInstance) GetGenerationReturnsOnCall(i int, result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + if fake.getGenerationReturnsOnCall == nil { + fake.getGenerationReturnsOnCall = make(map[int]struct { + result1 int64 + }) + } + fake.getGenerationReturnsOnCall[i] = struct { + result1 int64 + }{result1} +} + +func (fake *UpgradeInstance) GetLabels() map[string]string { + fake.getLabelsMutex.Lock() + ret, specificReturn := fake.getLabelsReturnsOnCall[len(fake.getLabelsArgsForCall)] + fake.getLabelsArgsForCall = append(fake.getLabelsArgsForCall, struct { + }{}) + stub := fake.GetLabelsStub + fakeReturns := fake.getLabelsReturns + fake.recordInvocation("GetLabels", []interface{}{}) + fake.getLabelsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetLabelsCallCount() int { + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + return len(fake.getLabelsArgsForCall) +} + +func (fake *UpgradeInstance) GetLabelsCalls(stub func() map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = stub +} + +func (fake *UpgradeInstance) GetLabelsReturns(result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + fake.getLabelsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *UpgradeInstance) GetLabelsReturnsOnCall(i int, result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + if fake.getLabelsReturnsOnCall == nil { + fake.getLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getLabelsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *UpgradeInstance) GetManagedFields() []v1.ManagedFieldsEntry { + fake.getManagedFieldsMutex.Lock() + ret, specificReturn := fake.getManagedFieldsReturnsOnCall[len(fake.getManagedFieldsArgsForCall)] + fake.getManagedFieldsArgsForCall = append(fake.getManagedFieldsArgsForCall, struct { + }{}) + stub := fake.GetManagedFieldsStub + fakeReturns := fake.getManagedFieldsReturns + fake.recordInvocation("GetManagedFields", []interface{}{}) + fake.getManagedFieldsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetManagedFieldsCallCount() int { + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + return len(fake.getManagedFieldsArgsForCall) +} + +func (fake *UpgradeInstance) GetManagedFieldsCalls(stub func() []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = stub +} + +func (fake *UpgradeInstance) GetManagedFieldsReturns(result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + fake.getManagedFieldsReturns = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *UpgradeInstance) GetManagedFieldsReturnsOnCall(i int, result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + if fake.getManagedFieldsReturnsOnCall == nil { + fake.getManagedFieldsReturnsOnCall = make(map[int]struct { + result1 []v1.ManagedFieldsEntry + }) + } + fake.getManagedFieldsReturnsOnCall[i] = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *UpgradeInstance) GetName() string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + }{}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *UpgradeInstance) GetNameCalls(stub func() string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *UpgradeInstance) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetNamespace() string { + fake.getNamespaceMutex.Lock() + ret, specificReturn := fake.getNamespaceReturnsOnCall[len(fake.getNamespaceArgsForCall)] + fake.getNamespaceArgsForCall = append(fake.getNamespaceArgsForCall, struct { + }{}) + stub := fake.GetNamespaceStub + fakeReturns := fake.getNamespaceReturns + fake.recordInvocation("GetNamespace", []interface{}{}) + fake.getNamespaceMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetNamespaceCallCount() int { + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + return len(fake.getNamespaceArgsForCall) +} + +func (fake *UpgradeInstance) GetNamespaceCalls(stub func() string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = stub +} + +func (fake *UpgradeInstance) GetNamespaceReturns(result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + fake.getNamespaceReturns = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetNamespaceReturnsOnCall(i int, result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + if fake.getNamespaceReturnsOnCall == nil { + fake.getNamespaceReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNamespaceReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetObjectKind() schema.ObjectKind { + fake.getObjectKindMutex.Lock() + ret, specificReturn := fake.getObjectKindReturnsOnCall[len(fake.getObjectKindArgsForCall)] + fake.getObjectKindArgsForCall = append(fake.getObjectKindArgsForCall, struct { + }{}) + stub := fake.GetObjectKindStub + fakeReturns := fake.getObjectKindReturns + fake.recordInvocation("GetObjectKind", []interface{}{}) + fake.getObjectKindMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetObjectKindCallCount() int { + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + return len(fake.getObjectKindArgsForCall) +} + +func (fake *UpgradeInstance) GetObjectKindCalls(stub func() schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = stub +} + +func (fake *UpgradeInstance) GetObjectKindReturns(result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + fake.getObjectKindReturns = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *UpgradeInstance) GetObjectKindReturnsOnCall(i int, result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + if fake.getObjectKindReturnsOnCall == nil { + fake.getObjectKindReturnsOnCall = make(map[int]struct { + result1 schema.ObjectKind + }) + } + fake.getObjectKindReturnsOnCall[i] = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *UpgradeInstance) GetOwnerReferences() []v1.OwnerReference { + fake.getOwnerReferencesMutex.Lock() + ret, specificReturn := fake.getOwnerReferencesReturnsOnCall[len(fake.getOwnerReferencesArgsForCall)] + fake.getOwnerReferencesArgsForCall = append(fake.getOwnerReferencesArgsForCall, struct { + }{}) + stub := fake.GetOwnerReferencesStub + fakeReturns := fake.getOwnerReferencesReturns + fake.recordInvocation("GetOwnerReferences", []interface{}{}) + fake.getOwnerReferencesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetOwnerReferencesCallCount() int { + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + return len(fake.getOwnerReferencesArgsForCall) +} + +func (fake *UpgradeInstance) GetOwnerReferencesCalls(stub func() []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = stub +} + +func (fake *UpgradeInstance) GetOwnerReferencesReturns(result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + fake.getOwnerReferencesReturns = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *UpgradeInstance) GetOwnerReferencesReturnsOnCall(i int, result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + if fake.getOwnerReferencesReturnsOnCall == nil { + fake.getOwnerReferencesReturnsOnCall = make(map[int]struct { + result1 []v1.OwnerReference + }) + } + fake.getOwnerReferencesReturnsOnCall[i] = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *UpgradeInstance) GetResourceVersion() string { + fake.getResourceVersionMutex.Lock() + ret, specificReturn := fake.getResourceVersionReturnsOnCall[len(fake.getResourceVersionArgsForCall)] + fake.getResourceVersionArgsForCall = append(fake.getResourceVersionArgsForCall, struct { + }{}) + stub := fake.GetResourceVersionStub + fakeReturns := fake.getResourceVersionReturns + fake.recordInvocation("GetResourceVersion", []interface{}{}) + fake.getResourceVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetResourceVersionCallCount() int { + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + return len(fake.getResourceVersionArgsForCall) +} + +func (fake *UpgradeInstance) GetResourceVersionCalls(stub func() string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = stub +} + +func (fake *UpgradeInstance) GetResourceVersionReturns(result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + fake.getResourceVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetResourceVersionReturnsOnCall(i int, result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + if fake.getResourceVersionReturnsOnCall == nil { + fake.getResourceVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getResourceVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetSelfLink() string { + fake.getSelfLinkMutex.Lock() + ret, specificReturn := fake.getSelfLinkReturnsOnCall[len(fake.getSelfLinkArgsForCall)] + fake.getSelfLinkArgsForCall = append(fake.getSelfLinkArgsForCall, struct { + }{}) + stub := fake.GetSelfLinkStub + fakeReturns := fake.getSelfLinkReturns + fake.recordInvocation("GetSelfLink", []interface{}{}) + fake.getSelfLinkMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetSelfLinkCallCount() int { + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + return len(fake.getSelfLinkArgsForCall) +} + +func (fake *UpgradeInstance) GetSelfLinkCalls(stub func() string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = stub +} + +func (fake *UpgradeInstance) GetSelfLinkReturns(result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + fake.getSelfLinkReturns = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetSelfLinkReturnsOnCall(i int, result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + if fake.getSelfLinkReturnsOnCall == nil { + fake.getSelfLinkReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getSelfLinkReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *UpgradeInstance) GetUID() types.UID { + fake.getUIDMutex.Lock() + ret, specificReturn := fake.getUIDReturnsOnCall[len(fake.getUIDArgsForCall)] + fake.getUIDArgsForCall = append(fake.getUIDArgsForCall, struct { + }{}) + stub := fake.GetUIDStub + fakeReturns := fake.getUIDReturns + fake.recordInvocation("GetUID", []interface{}{}) + fake.getUIDMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) GetUIDCallCount() int { + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + return len(fake.getUIDArgsForCall) +} + +func (fake *UpgradeInstance) GetUIDCalls(stub func() types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = stub +} + +func (fake *UpgradeInstance) GetUIDReturns(result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + fake.getUIDReturns = struct { + result1 types.UID + }{result1} +} + +func (fake *UpgradeInstance) GetUIDReturnsOnCall(i int, result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + if fake.getUIDReturnsOnCall == nil { + fake.getUIDReturnsOnCall = make(map[int]struct { + result1 types.UID + }) + } + fake.getUIDReturnsOnCall[i] = struct { + result1 types.UID + }{result1} +} + +func (fake *UpgradeInstance) IsHSMEnabled() bool { + fake.isHSMEnabledMutex.Lock() + ret, specificReturn := fake.isHSMEnabledReturnsOnCall[len(fake.isHSMEnabledArgsForCall)] + fake.isHSMEnabledArgsForCall = append(fake.isHSMEnabledArgsForCall, struct { + }{}) + stub := fake.IsHSMEnabledStub + fakeReturns := fake.isHSMEnabledReturns + fake.recordInvocation("IsHSMEnabled", []interface{}{}) + fake.isHSMEnabledMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) IsHSMEnabledCallCount() int { + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + return len(fake.isHSMEnabledArgsForCall) +} + +func (fake *UpgradeInstance) IsHSMEnabledCalls(stub func() bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = stub +} + +func (fake *UpgradeInstance) IsHSMEnabledReturns(result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + fake.isHSMEnabledReturns = struct { + result1 bool + }{result1} +} + +func (fake *UpgradeInstance) IsHSMEnabledReturnsOnCall(i int, result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + if fake.isHSMEnabledReturnsOnCall == nil { + fake.isHSMEnabledReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isHSMEnabledReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *UpgradeInstance) SetAnnotations(arg1 map[string]string) { + fake.setAnnotationsMutex.Lock() + fake.setAnnotationsArgsForCall = append(fake.setAnnotationsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetAnnotationsStub + fake.recordInvocation("SetAnnotations", []interface{}{arg1}) + fake.setAnnotationsMutex.Unlock() + if stub != nil { + fake.SetAnnotationsStub(arg1) + } +} + +func (fake *UpgradeInstance) SetAnnotationsCallCount() int { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + return len(fake.setAnnotationsArgsForCall) +} + +func (fake *UpgradeInstance) SetAnnotationsCalls(stub func(map[string]string)) { + fake.setAnnotationsMutex.Lock() + defer fake.setAnnotationsMutex.Unlock() + fake.SetAnnotationsStub = stub +} + +func (fake *UpgradeInstance) SetAnnotationsArgsForCall(i int) map[string]string { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + argsForCall := fake.setAnnotationsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetClusterName(arg1 string) { + fake.setClusterNameMutex.Lock() + fake.setClusterNameArgsForCall = append(fake.setClusterNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetClusterNameStub + fake.recordInvocation("SetClusterName", []interface{}{arg1}) + fake.setClusterNameMutex.Unlock() + if stub != nil { + fake.SetClusterNameStub(arg1) + } +} + +func (fake *UpgradeInstance) SetClusterNameCallCount() int { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + return len(fake.setClusterNameArgsForCall) +} + +func (fake *UpgradeInstance) SetClusterNameCalls(stub func(string)) { + fake.setClusterNameMutex.Lock() + defer fake.setClusterNameMutex.Unlock() + fake.SetClusterNameStub = stub +} + +func (fake *UpgradeInstance) SetClusterNameArgsForCall(i int) string { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + argsForCall := fake.setClusterNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetCreationTimestamp(arg1 v1.Time) { + fake.setCreationTimestampMutex.Lock() + fake.setCreationTimestampArgsForCall = append(fake.setCreationTimestampArgsForCall, struct { + arg1 v1.Time + }{arg1}) + stub := fake.SetCreationTimestampStub + fake.recordInvocation("SetCreationTimestamp", []interface{}{arg1}) + fake.setCreationTimestampMutex.Unlock() + if stub != nil { + fake.SetCreationTimestampStub(arg1) + } +} + +func (fake *UpgradeInstance) SetCreationTimestampCallCount() int { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + return len(fake.setCreationTimestampArgsForCall) +} + +func (fake *UpgradeInstance) SetCreationTimestampCalls(stub func(v1.Time)) { + fake.setCreationTimestampMutex.Lock() + defer fake.setCreationTimestampMutex.Unlock() + fake.SetCreationTimestampStub = stub +} + +func (fake *UpgradeInstance) SetCreationTimestampArgsForCall(i int) v1.Time { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + argsForCall := fake.setCreationTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetDeletionGracePeriodSeconds(arg1 *int64) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + fake.setDeletionGracePeriodSecondsArgsForCall = append(fake.setDeletionGracePeriodSecondsArgsForCall, struct { + arg1 *int64 + }{arg1}) + stub := fake.SetDeletionGracePeriodSecondsStub + fake.recordInvocation("SetDeletionGracePeriodSeconds", []interface{}{arg1}) + fake.setDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + fake.SetDeletionGracePeriodSecondsStub(arg1) + } +} + +func (fake *UpgradeInstance) SetDeletionGracePeriodSecondsCallCount() int { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.setDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *UpgradeInstance) SetDeletionGracePeriodSecondsCalls(stub func(*int64)) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + defer fake.setDeletionGracePeriodSecondsMutex.Unlock() + fake.SetDeletionGracePeriodSecondsStub = stub +} + +func (fake *UpgradeInstance) SetDeletionGracePeriodSecondsArgsForCall(i int) *int64 { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + argsForCall := fake.setDeletionGracePeriodSecondsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetDeletionTimestamp(arg1 *v1.Time) { + fake.setDeletionTimestampMutex.Lock() + fake.setDeletionTimestampArgsForCall = append(fake.setDeletionTimestampArgsForCall, struct { + arg1 *v1.Time + }{arg1}) + stub := fake.SetDeletionTimestampStub + fake.recordInvocation("SetDeletionTimestamp", []interface{}{arg1}) + fake.setDeletionTimestampMutex.Unlock() + if stub != nil { + fake.SetDeletionTimestampStub(arg1) + } +} + +func (fake *UpgradeInstance) SetDeletionTimestampCallCount() int { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + return len(fake.setDeletionTimestampArgsForCall) +} + +func (fake *UpgradeInstance) SetDeletionTimestampCalls(stub func(*v1.Time)) { + fake.setDeletionTimestampMutex.Lock() + defer fake.setDeletionTimestampMutex.Unlock() + fake.SetDeletionTimestampStub = stub +} + +func (fake *UpgradeInstance) SetDeletionTimestampArgsForCall(i int) *v1.Time { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + argsForCall := fake.setDeletionTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetFinalizers(arg1 []string) { + var arg1Copy []string + if arg1 != nil { + arg1Copy = make([]string, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setFinalizersMutex.Lock() + fake.setFinalizersArgsForCall = append(fake.setFinalizersArgsForCall, struct { + arg1 []string + }{arg1Copy}) + stub := fake.SetFinalizersStub + fake.recordInvocation("SetFinalizers", []interface{}{arg1Copy}) + fake.setFinalizersMutex.Unlock() + if stub != nil { + fake.SetFinalizersStub(arg1) + } +} + +func (fake *UpgradeInstance) SetFinalizersCallCount() int { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + return len(fake.setFinalizersArgsForCall) +} + +func (fake *UpgradeInstance) SetFinalizersCalls(stub func([]string)) { + fake.setFinalizersMutex.Lock() + defer fake.setFinalizersMutex.Unlock() + fake.SetFinalizersStub = stub +} + +func (fake *UpgradeInstance) SetFinalizersArgsForCall(i int) []string { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + argsForCall := fake.setFinalizersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetGenerateName(arg1 string) { + fake.setGenerateNameMutex.Lock() + fake.setGenerateNameArgsForCall = append(fake.setGenerateNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetGenerateNameStub + fake.recordInvocation("SetGenerateName", []interface{}{arg1}) + fake.setGenerateNameMutex.Unlock() + if stub != nil { + fake.SetGenerateNameStub(arg1) + } +} + +func (fake *UpgradeInstance) SetGenerateNameCallCount() int { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + return len(fake.setGenerateNameArgsForCall) +} + +func (fake *UpgradeInstance) SetGenerateNameCalls(stub func(string)) { + fake.setGenerateNameMutex.Lock() + defer fake.setGenerateNameMutex.Unlock() + fake.SetGenerateNameStub = stub +} + +func (fake *UpgradeInstance) SetGenerateNameArgsForCall(i int) string { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + argsForCall := fake.setGenerateNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetGeneration(arg1 int64) { + fake.setGenerationMutex.Lock() + fake.setGenerationArgsForCall = append(fake.setGenerationArgsForCall, struct { + arg1 int64 + }{arg1}) + stub := fake.SetGenerationStub + fake.recordInvocation("SetGeneration", []interface{}{arg1}) + fake.setGenerationMutex.Unlock() + if stub != nil { + fake.SetGenerationStub(arg1) + } +} + +func (fake *UpgradeInstance) SetGenerationCallCount() int { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + return len(fake.setGenerationArgsForCall) +} + +func (fake *UpgradeInstance) SetGenerationCalls(stub func(int64)) { + fake.setGenerationMutex.Lock() + defer fake.setGenerationMutex.Unlock() + fake.SetGenerationStub = stub +} + +func (fake *UpgradeInstance) SetGenerationArgsForCall(i int) int64 { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + argsForCall := fake.setGenerationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetLabels(arg1 map[string]string) { + fake.setLabelsMutex.Lock() + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetLabelsStub + fake.recordInvocation("SetLabels", []interface{}{arg1}) + fake.setLabelsMutex.Unlock() + if stub != nil { + fake.SetLabelsStub(arg1) + } +} + +func (fake *UpgradeInstance) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *UpgradeInstance) SetLabelsCalls(stub func(map[string]string)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *UpgradeInstance) SetLabelsArgsForCall(i int) map[string]string { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetManagedFields(arg1 []v1.ManagedFieldsEntry) { + var arg1Copy []v1.ManagedFieldsEntry + if arg1 != nil { + arg1Copy = make([]v1.ManagedFieldsEntry, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setManagedFieldsMutex.Lock() + fake.setManagedFieldsArgsForCall = append(fake.setManagedFieldsArgsForCall, struct { + arg1 []v1.ManagedFieldsEntry + }{arg1Copy}) + stub := fake.SetManagedFieldsStub + fake.recordInvocation("SetManagedFields", []interface{}{arg1Copy}) + fake.setManagedFieldsMutex.Unlock() + if stub != nil { + fake.SetManagedFieldsStub(arg1) + } +} + +func (fake *UpgradeInstance) SetManagedFieldsCallCount() int { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + return len(fake.setManagedFieldsArgsForCall) +} + +func (fake *UpgradeInstance) SetManagedFieldsCalls(stub func([]v1.ManagedFieldsEntry)) { + fake.setManagedFieldsMutex.Lock() + defer fake.setManagedFieldsMutex.Unlock() + fake.SetManagedFieldsStub = stub +} + +func (fake *UpgradeInstance) SetManagedFieldsArgsForCall(i int) []v1.ManagedFieldsEntry { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + argsForCall := fake.setManagedFieldsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetName(arg1 string) { + fake.setNameMutex.Lock() + fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNameStub + fake.recordInvocation("SetName", []interface{}{arg1}) + fake.setNameMutex.Unlock() + if stub != nil { + fake.SetNameStub(arg1) + } +} + +func (fake *UpgradeInstance) SetNameCallCount() int { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + return len(fake.setNameArgsForCall) +} + +func (fake *UpgradeInstance) SetNameCalls(stub func(string)) { + fake.setNameMutex.Lock() + defer fake.setNameMutex.Unlock() + fake.SetNameStub = stub +} + +func (fake *UpgradeInstance) SetNameArgsForCall(i int) string { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + argsForCall := fake.setNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetNamespace(arg1 string) { + fake.setNamespaceMutex.Lock() + fake.setNamespaceArgsForCall = append(fake.setNamespaceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNamespaceStub + fake.recordInvocation("SetNamespace", []interface{}{arg1}) + fake.setNamespaceMutex.Unlock() + if stub != nil { + fake.SetNamespaceStub(arg1) + } +} + +func (fake *UpgradeInstance) SetNamespaceCallCount() int { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + return len(fake.setNamespaceArgsForCall) +} + +func (fake *UpgradeInstance) SetNamespaceCalls(stub func(string)) { + fake.setNamespaceMutex.Lock() + defer fake.setNamespaceMutex.Unlock() + fake.SetNamespaceStub = stub +} + +func (fake *UpgradeInstance) SetNamespaceArgsForCall(i int) string { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + argsForCall := fake.setNamespaceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetOwnerReferences(arg1 []v1.OwnerReference) { + var arg1Copy []v1.OwnerReference + if arg1 != nil { + arg1Copy = make([]v1.OwnerReference, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setOwnerReferencesMutex.Lock() + fake.setOwnerReferencesArgsForCall = append(fake.setOwnerReferencesArgsForCall, struct { + arg1 []v1.OwnerReference + }{arg1Copy}) + stub := fake.SetOwnerReferencesStub + fake.recordInvocation("SetOwnerReferences", []interface{}{arg1Copy}) + fake.setOwnerReferencesMutex.Unlock() + if stub != nil { + fake.SetOwnerReferencesStub(arg1) + } +} + +func (fake *UpgradeInstance) SetOwnerReferencesCallCount() int { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + return len(fake.setOwnerReferencesArgsForCall) +} + +func (fake *UpgradeInstance) SetOwnerReferencesCalls(stub func([]v1.OwnerReference)) { + fake.setOwnerReferencesMutex.Lock() + defer fake.setOwnerReferencesMutex.Unlock() + fake.SetOwnerReferencesStub = stub +} + +func (fake *UpgradeInstance) SetOwnerReferencesArgsForCall(i int) []v1.OwnerReference { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + argsForCall := fake.setOwnerReferencesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetResourceVersion(arg1 string) { + fake.setResourceVersionMutex.Lock() + fake.setResourceVersionArgsForCall = append(fake.setResourceVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetResourceVersionStub + fake.recordInvocation("SetResourceVersion", []interface{}{arg1}) + fake.setResourceVersionMutex.Unlock() + if stub != nil { + fake.SetResourceVersionStub(arg1) + } +} + +func (fake *UpgradeInstance) SetResourceVersionCallCount() int { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + return len(fake.setResourceVersionArgsForCall) +} + +func (fake *UpgradeInstance) SetResourceVersionCalls(stub func(string)) { + fake.setResourceVersionMutex.Lock() + defer fake.setResourceVersionMutex.Unlock() + fake.SetResourceVersionStub = stub +} + +func (fake *UpgradeInstance) SetResourceVersionArgsForCall(i int) string { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + argsForCall := fake.setResourceVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetSelfLink(arg1 string) { + fake.setSelfLinkMutex.Lock() + fake.setSelfLinkArgsForCall = append(fake.setSelfLinkArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetSelfLinkStub + fake.recordInvocation("SetSelfLink", []interface{}{arg1}) + fake.setSelfLinkMutex.Unlock() + if stub != nil { + fake.SetSelfLinkStub(arg1) + } +} + +func (fake *UpgradeInstance) SetSelfLinkCallCount() int { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + return len(fake.setSelfLinkArgsForCall) +} + +func (fake *UpgradeInstance) SetSelfLinkCalls(stub func(string)) { + fake.setSelfLinkMutex.Lock() + defer fake.setSelfLinkMutex.Unlock() + fake.SetSelfLinkStub = stub +} + +func (fake *UpgradeInstance) SetSelfLinkArgsForCall(i int) string { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + argsForCall := fake.setSelfLinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) SetUID(arg1 types.UID) { + fake.setUIDMutex.Lock() + fake.setUIDArgsForCall = append(fake.setUIDArgsForCall, struct { + arg1 types.UID + }{arg1}) + stub := fake.SetUIDStub + fake.recordInvocation("SetUID", []interface{}{arg1}) + fake.setUIDMutex.Unlock() + if stub != nil { + fake.SetUIDStub(arg1) + } +} + +func (fake *UpgradeInstance) SetUIDCallCount() int { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + return len(fake.setUIDArgsForCall) +} + +func (fake *UpgradeInstance) SetUIDCalls(stub func(types.UID)) { + fake.setUIDMutex.Lock() + defer fake.setUIDMutex.Unlock() + fake.SetUIDStub = stub +} + +func (fake *UpgradeInstance) SetUIDArgsForCall(i int) types.UID { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + argsForCall := fake.setUIDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *UpgradeInstance) UsingCouchDB() bool { + fake.usingCouchDBMutex.Lock() + ret, specificReturn := fake.usingCouchDBReturnsOnCall[len(fake.usingCouchDBArgsForCall)] + fake.usingCouchDBArgsForCall = append(fake.usingCouchDBArgsForCall, struct { + }{}) + stub := fake.UsingCouchDBStub + fakeReturns := fake.usingCouchDBReturns + fake.recordInvocation("UsingCouchDB", []interface{}{}) + fake.usingCouchDBMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) UsingCouchDBCallCount() int { + fake.usingCouchDBMutex.RLock() + defer fake.usingCouchDBMutex.RUnlock() + return len(fake.usingCouchDBArgsForCall) +} + +func (fake *UpgradeInstance) UsingCouchDBCalls(stub func() bool) { + fake.usingCouchDBMutex.Lock() + defer fake.usingCouchDBMutex.Unlock() + fake.UsingCouchDBStub = stub +} + +func (fake *UpgradeInstance) UsingCouchDBReturns(result1 bool) { + fake.usingCouchDBMutex.Lock() + defer fake.usingCouchDBMutex.Unlock() + fake.UsingCouchDBStub = nil + fake.usingCouchDBReturns = struct { + result1 bool + }{result1} +} + +func (fake *UpgradeInstance) UsingCouchDBReturnsOnCall(i int, result1 bool) { + fake.usingCouchDBMutex.Lock() + defer fake.usingCouchDBMutex.Unlock() + fake.UsingCouchDBStub = nil + if fake.usingCouchDBReturnsOnCall == nil { + fake.usingCouchDBReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.usingCouchDBReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *UpgradeInstance) UsingHSMProxy() bool { + fake.usingHSMProxyMutex.Lock() + ret, specificReturn := fake.usingHSMProxyReturnsOnCall[len(fake.usingHSMProxyArgsForCall)] + fake.usingHSMProxyArgsForCall = append(fake.usingHSMProxyArgsForCall, struct { + }{}) + stub := fake.UsingHSMProxyStub + fakeReturns := fake.usingHSMProxyReturns + fake.recordInvocation("UsingHSMProxy", []interface{}{}) + fake.usingHSMProxyMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *UpgradeInstance) UsingHSMProxyCallCount() int { + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + return len(fake.usingHSMProxyArgsForCall) +} + +func (fake *UpgradeInstance) UsingHSMProxyCalls(stub func() bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = stub +} + +func (fake *UpgradeInstance) UsingHSMProxyReturns(result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + fake.usingHSMProxyReturns = struct { + result1 bool + }{result1} +} + +func (fake *UpgradeInstance) UsingHSMProxyReturnsOnCall(i int, result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + if fake.usingHSMProxyReturnsOnCall == nil { + fake.usingHSMProxyReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.usingHSMProxyReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *UpgradeInstance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + fake.usingCouchDBMutex.RLock() + defer fake.usingCouchDBMutex.RUnlock() + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *UpgradeInstance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ action.UpgradeInstance = new(UpgradeInstance) diff --git a/pkg/action/upgradedbs.go b/pkg/action/upgradedbs.go new file mode 100644 index 00000000..7fbdb070 --- /dev/null +++ b/pkg/action/upgradedbs.go @@ -0,0 +1,528 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + oconfig "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + controller "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +//go:generate counterfeiter -o mocks/deploymentreset.go -fake-name DeploymentReset . DeploymentReset + +// DeploymentReset defines the contract to manage deployment reousrce +type DeploymentReset interface { + Get(v1.Object) (k8sclient.Object, error) + DeploymentStatus(v1.Object) (appsv1.DeploymentStatus, error) + GetScheme() *runtime.Scheme +} + +//go:generate counterfeiter -o mocks/upgradeinstance.go -fake-name UpgradeInstance . UpgradeInstance + +// UpgradeInstance defines the contract to update the insstance database +type UpgradeInstance interface { + runtime.Object + v1.Object + UsingCouchDB() bool + UsingHSMProxy() bool + IsHSMEnabled() bool +} + +// UpgradeDBs will update the database and peform all necessary clean up and restart logic +func UpgradeDBs(deploymentManager DeploymentReset, client controller.Client, instance UpgradeInstance, timeouts oconfig.DBMigrationTimeouts) error { + obj, err := deploymentManager.Get(instance) + if err != nil { + return errors.Wrap(err, "failed to get deployment") + } + + dep := deployment.New(obj.(*appsv1.Deployment)) + originalReplicas := dep.Spec.Replicas + + // Need to set replica to 0, otherwise migration job won't be able start to due to + // volume being attached to another node. + // + // Wait for deployment to get marked as unavailable after replica updated to 0 + if err := setReplicaCountAndWait(client, deploymentManager, instance, int32(0), timeouts.ReplicaChange.Get()); err != nil { + return errors.Wrapf(err, "failed to update deployment for '%s'", instance.GetName()) + } + + if err := waitForPodToDelete(client, instance, timeouts.PodDeletion.Get()); err != nil { + return err + } + + var ip string + if instance.UsingCouchDB() { + couchDBPod := getCouchDBPod(dep) + if err := startCouchDBPod(client, couchDBPod); err != nil { + return err + } + + ip, err = waitForPodToBeRunning(client, couchDBPod, timeouts.PodStart.Get()) + if err != nil { + return errors.Wrap(err, "couchdb pod failed to start") + } + } + + var hsmConfig *config.HSMConfig + if !instance.UsingHSMProxy() && instance.IsHSMEnabled() { + hsmConfig, err = config.ReadHSMConfig(client, instance) + if err != nil { + return err + } + } + + job := peerDBMigrationJob(dep, instance.(*current.IBPPeer), hsmConfig, ip, timeouts) + creatOpt := controllerclient.CreateOption{ + Owner: instance, + Scheme: deploymentManager.GetScheme(), + } + if err := StartJob(client, job.Job, creatOpt); err != nil { + if instance.UsingCouchDB() { + log.Info("failed to start db migration job, deleting couchdb pod") + couchDBPod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-couchdb", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + } + + if err := client.Delete(context.TODO(), couchDBPod); err != nil { + return errors.Wrap(err, "failed to delete couchdb pod") + } + } + return errors.Wrap(err, "failed to start db migration job") + } + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + // Wait for job to start and pod to go into running state before reverting + // back to original replica value + if err := job.WaitUntilActive(client); err != nil { + return err + } + log.Info(fmt.Sprintf("Job '%s' active", job.GetName())) + + if err := job.WaitUntilContainerFinished(client, "dbmigration"); err != nil { + return err + } + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + // Wait for deployment to get marked as available after replica update + if err := setReplicaCountAndWait(client, deploymentManager, instance, *originalReplicas, timeouts.ReplicaChange.Get()); err != nil { + return errors.Wrapf(err, "failed to update deployment for '%s'", instance.GetName()) + } + + return nil +} + +// StartJob uses the client to create a job on kubernetes client +func StartJob(client controller.Client, job *batchv1.Job, opt controller.CreateOption) error { + log.Info(fmt.Sprintf("Starting job '%s'", job.GetName())) + + if err := client.Create(context.TODO(), job, opt); err != nil { + return errors.Wrap(err, "failed to create migration job") + } + + return nil +} + +func startCouchDBPod(client controller.Client, pod *corev1.Pod) error { + log.Info(fmt.Sprintf("Starting couchdb pod '%s'", pod.GetName())) + + if err := client.Create(context.TODO(), pod); err != nil { + return errors.Wrap(err, "failed to create couchdb pod") + } + + return nil +} + +func getCouchDBPod(dep *deployment.Deployment) *corev1.Pod { + couchdb := dep.MustGetContainer("couchdb") + + localSpecCopy := dep.Spec.Template.Spec.DeepCopy() + volumes := localSpecCopy.Volumes + // Remove ledgerdb volume from couchddb pod + for i, volume := range volumes { + if volume.Name == "fabric-peer-0" { + // Remove the ledgerdb data from couchdb container + volumes[i] = volumes[len(volumes)-1] + volumes = volumes[:len(volumes)-1] + break + } + } + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-couchdb", dep.GetName()), + Namespace: dep.GetNamespace(), + Labels: map[string]string{ + "app": dep.Name, + }, + }, + Spec: corev1.PodSpec{ + ImagePullSecrets: dep.Spec.Template.Spec.ImagePullSecrets, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + *couchdb.Container, + }, + Volumes: volumes, + }, + } +} + +func waitForPodToDelete(client controller.Client, instance metav1.Object, timeout time.Duration) error { + err := wait.Poll(2*time.Second, timeout, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for pod for deployment '%s' to be deleted", instance.GetName())) + + labelSelector, err := labels.Parse(fmt.Sprintf("app=%s", instance.GetName())) + if err != nil { + return false, nil + } + + opts := &k8sclient.ListOptions{ + LabelSelector: labelSelector, + } + + pods := &corev1.PodList{} + if err := client.List(context.TODO(), pods, opts); err != nil { + return false, nil + } + + if len(pods.Items) == 0 { + return true, nil + } + + return false, nil + }) + if err != nil { + return errors.Wrapf(err, "failed to delete pod associated with '%s'", instance.GetName()) + } + return nil +} + +func waitForPodToBeRunning(client controller.Client, pod *corev1.Pod, timeout time.Duration) (string, error) { + var podIP string + p := &corev1.Pod{} + + err := wait.Poll(2*time.Second, timeout, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for couchdb pod '%s' to be running", pod.GetName())) + + label := fmt.Sprintf("app=%s", pod.Labels["app"]) + labelSelector, err := labels.Parse(label) + if err != nil { + return false, err + } + + opts := &k8sclient.ListOptions{ + LabelSelector: labelSelector, + } + + pods := &corev1.PodList{} + if err := client.List(context.TODO(), pods, opts); err != nil { + return false, err + } + + if len(pods.Items) != 1 { + return false, nil + } + + p = &pods.Items[0] + if len(p.Status.ContainerStatuses) > 0 && p.Status.ContainerStatuses[0].State.Running != nil { + if p.Status.ContainerStatuses[0].Ready { + return true, nil + } + } + + return false, nil + }) + if err != nil { + return podIP, errors.Wrapf(err, "pod '%s' not running", pod.GetName()) + } + + if p != nil { + podIP = p.Status.PodIP + } + + return podIP, nil +} + +func setReplicaCountAndWait(client controller.Client, deploymentManager DeploymentReset, instance metav1.Object, count int32, timeout time.Duration) error { + obj, err := deploymentManager.Get(instance) + if err != nil { + return errors.Wrap(err, "failed to get deployment") + } + dep := deployment.New(obj.DeepCopyObject().(*appsv1.Deployment)) + + if err := setReplicaCountOnDeployment(client, obj, dep, count); err != nil { + return err + } + + err = wait.Poll(2*time.Second, timeout, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for deployment '%s' replicas to go to %d", dep.GetName(), count)) + status, err := deploymentManager.DeploymentStatus(instance) + if err == nil { + if status.Replicas == count { + return true, nil + } + } + return false, nil + }) + if err != nil { + return errors.Wrap(err, "failed to determine if deployment is available") + } + + return nil +} + +func setReplicaCountOnDeployment(client controller.Client, obj k8sclient.Object, dep *deployment.Deployment, count int32) error { + dep.Deployment.Spec.Replicas = &count + if err := client.Patch(context.TODO(), dep.Deployment, k8sclient.MergeFrom(obj)); err != nil { + return errors.Wrapf(err, "failed to update replica to %d", count) + } + return nil +} + +// Copy of container that is passed but updated with new command +func peerDBMigrationJob(dep *deployment.Deployment, instance *current.IBPPeer, hsmConfig *config.HSMConfig, couchdbIP string, timeouts oconfig.DBMigrationTimeouts) *jobv1.Job { + cont := dep.MustGetContainer("peer") + envs := []string{ + "LICENSE", + "FABRIC_CFG_PATH", + "CORE_PEER_MSPCONFIGPATH", + "CORE_PEER_FILESYSTEMPATH", + "CORE_PEER_TLS_ENABLED", + "CORE_PEER_TLS_CERT_FILE", + "CORE_PEER_TLS_KEY_FILE", + "CORE_PEER_TLS_ROOTCERT_FILE", + "CORE_PEER_LOCALMSPID", + "CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME", + "CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD", + "CORE_LEDGER_STATE_STATEDATABASE", + } + + backoffLimit := int32(0) + envVars := cont.GetEnvs(envs) + envVars = append(envVars, + corev1.EnvVar{ + Name: "FABRIC_LOGGING_SPEC", + Value: "debug", + }, + ) + + if couchdbIP != "" { + envVars = append(envVars, + corev1.EnvVar{ + Name: "CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS", + Value: fmt.Sprintf("%s:5984", couchdbIP), + }, + ) + } + + command := `echo "Migrating peer's database" && peer node upgrade-dbs && mkdir -p /data/status && ts=$(date +%Y%m%d-%H%M%S) && touch /data/status/migrated_to_v2-$ts` + + if instance.UsingHSMProxy() { + envVars = append(envVars, + corev1.EnvVar{ + Name: "PKCS11_PROXY_SOCKET", + Value: instance.Spec.HSM.PKCS11Endpoint, + }, + ) + } + + localSpecCopy := dep.Spec.Template.Spec.DeepCopy() + volumes := localSpecCopy.Volumes + + if instance.UsingCouchDB() { + // Remove statedb volume from migration pod + for i, volume := range volumes { + if volume.Name == "db-data" { + // Remove the statedb data from couchdb container + volumes[i] = volumes[len(volumes)-1] + volumes = volumes[:len(volumes)-1] + break + } + } + } + + k8sJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-dbmigration", instance.GetName()), + Namespace: dep.GetNamespace(), + Labels: map[string]string{ + "job-name": fmt.Sprintf("%s-dbmigration", instance.GetName()), + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ImagePullSecrets: dep.Spec.Template.Spec.ImagePullSecrets, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "dbmigration", + Image: image.Format(instance.Spec.Images.PeerImage, instance.Spec.Images.PeerTag), + ImagePullPolicy: cont.ImagePullPolicy, + Command: []string{ + "sh", + "-c", + command, + }, + Env: envVars, + Resources: cont.Resources, + SecurityContext: cont.SecurityContext, + VolumeMounts: cont.VolumeMounts, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + job := jobv1.New(k8sJob, &jobv1.Timeouts{ + WaitUntilActive: timeouts.JobStart.Get(), + WaitUntilFinished: timeouts.JobCompletion.Get(), + }) + + if hsmConfig != nil { + migrationCont := job.MustGetContainer("dbmigration") + migrationCont.Env = append(migrationCont.Env, hsmConfig.Envs...) + + volume := corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + } + job.Spec.Template.Spec.Volumes = util.AppendVolumeIfMissing(job.Spec.Template.Spec.Volumes, volume) + + initCont := HSMInitContainer(instance, hsmConfig) + job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, *initCont.Container) + + if hsmConfig.Daemon != nil { + // Unable to launch daemon if not running priviledged moe + t := true + migrationCont.SecurityContext.Privileged = &t + migrationCont.SecurityContext.AllowPrivilegeEscalation = &t + + // This is the shared volume where the file 'pkcsslotd-luanched' is touched to let + // other containers know that the daemon has successfully launched. + migrationCont.AppendVolumeMountIfMissing("shared", "/shared") + + // Update command in deployment to ensure that deamon is running before starting the ca + migrationCont.Command = []string{ + "sh", + "-c", + fmt.Sprintf("%s && %s", config.DAEMON_CHECK_CMD, command), + } + + var pvcMount *corev1.VolumeMount + for _, vm := range hsmConfig.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: "fabric-peer-0", + MountPath: vm.MountPath, + } + } + } + + // Add daemon container to the job + config.AddDaemonContainer(hsmConfig, job, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the CertGen container + if pvcMount != nil { + migrationCont.AppendVolumeMountIfMissing(pvcMount.Name, pvcMount.MountPath) + } + } + } + + return job +} + +// HSMInitContainer creates a container that copies the HSM library to shared volume +func HSMInitContainer(instance *current.IBPPeer, hsmConfig *config.HSMConfig) *container.Container { + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + f := false + user := int64(0) + mountPath := "/shared" + initCont := &container.Container{ + Container: &corev1.Container{ + Name: "hsm-client", + Image: image.Format(instance.Spec.Images.HSMImage, instance.Spec.Images.HSMTag), + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + }, + } + + return initCont +} diff --git a/pkg/action/upgradedbs_test.go b/pkg/action/upgradedbs_test.go new file mode 100644 index 00000000..90f86b17 --- /dev/null +++ b/pkg/action/upgradedbs_test.go @@ -0,0 +1,173 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package action_test + +import ( + "context" + "errors" + "strings" + + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + "github.com/IBM-Blockchain/fabric-operator/pkg/action/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" +) + +var _ = Describe("actions", func() { + var ( + depMgr *mocks.DeploymentReset + client *controllermocks.Client + instance *current.IBPPeer + ) + + BeforeEach(func() { + depMgr = &mocks.DeploymentReset{} + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peer", + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerImage: "peerimage", + PeerTag: "peertag", + }, + }, + } + + replicas := int32(1) + dep := &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + v1.Container{ + Name: "peer", + }, + }, + }, + }, + }, + } + depMgr.GetReturnsOnCall(0, dep, nil) + depMgr.GetReturnsOnCall(1, &appsv1.Deployment{}, nil) + depMgr.GetReturnsOnCall(2, &appsv1.Deployment{}, nil) + depMgr.GetSchemeReturns(&runtime.Scheme{}) + + status := appsv1.DeploymentStatus{ + Replicas: int32(0), + } + depMgr.DeploymentStatusReturnsOnCall(0, status, nil) + + status.Replicas = 1 + depMgr.DeploymentStatusReturnsOnCall(1, status, nil) + + client = &controllermocks.Client{ + GetStub: func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + job := obj.(*batchv1.Job) + job.Status.Active = int32(1) + } + return nil + }, + ListStub: func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + pods := obj.(*corev1.PodList) + if strings.Contains(opts[0].(*k8sclient.ListOptions).LabelSelector.String(), "job-name") { + pods.Items = []corev1.Pod{ + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + // Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + }, + } + } + } + return nil + }, + } + }) + + Context("peer upgrade dbs", func() { + It("returns error if failed to set replica to zero", func() { + client.PatchReturnsOnCall(0, errors.New("update error")) + err := action.UpgradeDBs(depMgr, client, instance, config.DBMigrationTimeouts{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("update error"))) + }) + + It("returns error if failed to set replica to original value", func() { + client.PatchReturnsOnCall(1, errors.New("update error")) + err := action.UpgradeDBs(depMgr, client, instance, config.DBMigrationTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("update error"))) + }) + + It("returns error if failed start job", func() { + client.CreateReturns(errors.New("job create error")) + err := action.UpgradeDBs(depMgr, client, instance, config.DBMigrationTimeouts{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("job create error"))) + }) + + It("upgrade dbs", func() { + err := action.UpgradeDBs(depMgr, client, instance, config.DBMigrationTimeouts{}) + Expect(err).NotTo(HaveOccurred()) + + By("starting job", func() { + Expect(client.CreateCallCount()).To(Equal(1)) + }) + + By("updating deployments to update replicas", func() { + _, dep, _, _ := client.PatchArgsForCall(0) + Expect(*dep.(*appsv1.Deployment).Spec.Replicas).To(Equal(int32(0))) + + _, dep, _, _ = client.PatchArgsForCall(1) + Expect(*dep.(*appsv1.Deployment).Spec.Replicas).To(Equal(int32(1))) + + Expect(client.PatchCallCount()).To(Equal(2)) + }) + }) + }) +}) diff --git a/pkg/apis/ca/v1/ca.go b/pkg/apis/ca/v1/ca.go new file mode 100644 index 00000000..1e70fc6a --- /dev/null +++ b/pkg/apis/ca/v1/ca.go @@ -0,0 +1,374 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" +) + +// ServerConfig is the fabric-ca server's config +type ServerConfig struct { + CAConfig `json:",inline"` + // Listening port for the server + Port int `json:"port,omitempty"` + // Bind address for the server + Address string `json:"address,omitempty"` + // Cross-Origin Resource Sharing settings for the server + CORS CORS `json:"cors,omitempty"` + // Enables debug logging + Debug *bool `json:"debug,omitempty"` + // Sets the logging level on the server + LogLevel string `json:"loglevel,omitempty"` + // TLS for the server's listening endpoint + TLS ServerTLSConfig `json:"tls,omitempty"` + // CACfg is the default CA's config + // The names of the CA configuration files + // This is empty unless there are non-default CAs served by this server + CAfiles []string `json:"cafiles,omitempty"` + // The number of non-default CAs, which is useful for a dev environment to + // quickly start any number of CAs in a single server + CAcount int `json:"cacount,omitempty"` + // Size limit of an acceptable CRL in bytes + CRLSizeLimit int `json:"crlsizelimit,omitempty"` + // CompMode1_3 determines if to run in comptability for version 1.3 + CompMode1_3 *bool `json:"compmode1_3,omitempty"` + // Metrics contains the configuration for provider and statsd + Metrics MetricsOptions `json:"metrics,omitempty"` + // Operations contains the configuration for the operations servers + Operations Options `json:"operations,omitempty"` +} + +type LDAP struct { + Enabled *bool `json:"enabled,omitempty"` + URL string `json:"url,omitempty"` + UserFilter string `json:"userFilter,omitempty"` + GroupFilter string `json:"groupFilter,omitempty"` + Attribute AttrConfig `json:"attribute,omitempty"` + TLS ClientTLSConfig `json:"tls,omitempty"` +} + +// AttrConfig is attribute configuration information +type AttrConfig struct { + Names []string `json:"names,omitempty"` + Converters []NameVal `json:"converters,omitempty"` + Maps map[string][]NameVal `json:"maps,omitempty"` +} + +type NameVal struct { + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` +} + +type CAConfig struct { + Version string `json:"version,omitempty"` + Cfg CfgOptions `json:"cfg,omitempty"` + CA CAInfo `json:"ca,omitempty"` + Signing Signing `json:"signing,omitempty"` + CSR CSRInfo `json:"csr,omitempty"` + Registry CAConfigRegistry `json:"registry,omitempty"` + Affiliations map[string]interface{} `json:"affiliations,omitempty"` + LDAP LDAP `json:"ldap,omitempty"` + DB *CAConfigDB `json:"db,omitempty"` + CSP *BCCSP `json:"bccsp,omitempty"` + Intermediate IntermediateCA `json:"intermediate,omitempty"` + CRL CRLConfig `json:"crl,omitempty"` + + // Optional client config for an intermediate server which acts as a client + // of the root (or parent) server + // Client *ClientConfig `json:"client"` +} + +// CSRInfo is Certificate Signing Request (CSR) Information +type CSRInfo struct { + CN string `json:"cn"` + Names []Name `json:"names,omitempty"` + Hosts []string `json:"hosts,omitempty"` + KeyRequest *KeyRequest `json:"key,omitempty"` + CA *CSRCAConfig `json:"ca,omitempty"` + SerialNumber string `json:"serial_number,omitempty"` +} + +type CSRCAConfig struct { + PathLength int `json:"pathlen"` + PathLenZero *bool `json:"pathlenzero"` + Expiry string `json:"expiry"` + Backdate string `json:"backdate"` +} + +// A Name contains the SubjectInfo fields. +type Name struct { + C string `json:"C,omitempty"` + ST string `json:"ST,omitempty"` + L string `json:"L,omitempty"` + O string `json:"O,omitempty"` + OU string `json:"OU,omitempty"` + SerialNumber string `json:"SerialNumber,omitempty"` +} + +// KeyRequest encapsulates size and algorithm for the key to be generated +type KeyRequest struct { + Algo string `json:"algo"` + Size int `json:"size"` +} + +type CORS struct { + Enabled *bool `json:"enabled"` + Origins []string `json:"origins"` +} + +type BCCSP struct { + ProviderName string `json:"default,omitempty"` + SW *SwOpts `json:"sw,omitempty"` + PKCS11 *PKCS11Opts `json:"pkcs11,omitempty"` +} + +// SwOpts contains options for the SWFactory +type SwOpts struct { + SecLevel int `json:"security,omitempty"` + HashFamily string `json:"hash,omitempty"` + FileKeyStore FileKeyStoreOpts `json:"filekeystore,omitempty"` +} + +type PKCS11Opts struct { + SecLevel int `json:"security,omitempty"` + HashFamily string `json:"hash,omitempty"` + Library string `json:"library,omitempty"` + Label string `json:"label,omitempty"` + Pin string `json:"pin,omitempty"` + Ephemeral *bool `json:"tempkeys,omitempty"` + SoftVerify *bool `json:"softwareVerify,omitempty"` + Immutable *bool `json:"immutable,omitempty"` + FileKeyStore FileKeyStoreOpts `json:"filekeystore,omitempty"` +} + +type FileKeyStoreOpts struct { + KeyStorePath string `json:"keystore,omitempty"` +} + +// Signing codifies the signature configuration policy for a CA. +type Signing struct { + Profiles map[string]*SigningProfile `json:"profiles"` + Default *SigningProfile `json:"default"` +} + +// A SigningProfile stores information that the CA needs to store +// signature policy. +type SigningProfile struct { + Usage []string `json:"usage,omitempty"` + IssuerURL []string `json:"issuerurl,omitempty"` + OCSP string `json:"ocsp,omitempty"` + CRL string `json:"crl,omitempty"` + CAConstraint CAConstraint `json:"caconstraint,omitempty"` + OCSPNoCheck *bool `json:"ocspnocheck,omitempty"` + ExpiryString string `json:"expirystring,omitempty"` + BackdateString string `json:"backdatestring,omitempty"` + AuthKeyName string `json:"authkeyname,omitempty"` + RemoteName string `json:"remotename,omitempty"` + NameWhitelistString string `json:"namewhiteliststring,omitempty"` + AuthRemote AuthRemote `json:"authremote,omitempty"` + CTLogServers []string `json:"ctlogservers,omitempty"` + CertStore string `json:"certstore,omitempty"` + Expiry commonapi.Duration `json:"expiry,omitempty"` + + // TODO: Do these need to be overridable? + // AllowedExtensions []cfconfig.OID `json:"allowedextensions,omitempty"` + // Policies []CertificatePolicy + // Backdate time.Duration + // Provider auth.Provider + // RemoteProvider auth.Provider + // RemoteServer string + // RemoteCAs *x509.CertPool + // ClientCert *tls.Certificate + // CSRWhitelist *CSRWhitelist + // NameWhitelist *regexp.Regexp + // ExtensionWhitelist map[string]bool + // ClientProvidesSerialNumbers bool + // NotBefore time.Time `json:"notbefore,omitempty"` + // NotAfter time.Time `json:"notafter,omitempty"` +} + +// CAConstraint specifies various CA constraints on the signed certificate. +// CAConstraint would verify against (and override) the CA +// extensions in the given CSR. +type CAConstraint struct { + IsCA *bool `json:"isca,omitempty"` + MaxPathLen int `json:"maxpathlen,omitempty"` + MaxPathLenZero *bool `json:"maxpathlenzero,omitempty"` +} + +// AuthRemote is an authenticated remote signer. +type AuthRemote struct { + RemoteName string `json:"remote,omitempty"` + AuthKeyName string `json:"authkey,omitempty"` +} + +// CfgOptions is a CA configuration that allows for setting different options +type CfgOptions struct { + Identities IdentitiesOptions `json:"identities,omitempty"` + Affiliations AffiliationsOptions `json:"affiliations,omitempty"` +} + +// IdentitiesOptions are options that are related to identities +type IdentitiesOptions struct { + PasswordAttempts int `json:"passwordattempts,omitempty"` + AllowRemove *bool `json:"allowremove,omitempty"` +} + +// AffiliationsOptions are options that are related to affiliations +type AffiliationsOptions struct { + AllowRemove *bool `json:"allowremove,omitempty"` +} + +// CAInfo is the CA information on a fabric-ca-server +type CAInfo struct { + Name string `json:"name,omitempty"` + Keyfile string `json:"keyfile,omitempty"` + Certfile string `json:"certfile,omitempty"` + Chainfile string `json:"chainfile,omitempty"` + ReenrollIgnoreCertExpiry *bool `json:"reenrollignorecertexpiry,omitempty"` +} + +// CAConfigDB is the database part of the server's config +type CAConfigDB struct { + Type string `json:"type,omitempty"` + Datasource string `json:"datasource,omitempty"` + TLS ClientTLSConfig `json:"tls,omitempty,omitempty"` +} + +// CAConfigRegistry is the registry part of the server's config +type CAConfigRegistry struct { + MaxEnrollments int `json:"maxenrollments,omitempty"` + Identities []CAConfigIdentity `json:"identities,omitempty"` +} + +// CAConfigIdentity is identity information in the server's config +type CAConfigIdentity struct { + Name string `json:"name,omitempty"` + Pass string `json:"pass,omitempty"` + Type string `json:"type,omitempty"` + Affiliation string `json:"affiliation,omitempty"` + MaxEnrollments int `json:"maxenrollments,omitempty"` + Attrs map[string]interface{} `json:"attrs,omitempty"` +} + +// ParentServer contains URL for the parent server and the name of CA inside +// the server to connect to +type ParentServer struct { + URL string `json:"url,omitempty"` + CAName string `json:"caname,omitempty"` +} + +// IntermediateCA contains parent server information, TLS configuration, and +// enrollment request for an intermetiate CA +type IntermediateCA struct { + ParentServer ParentServer `json:"parentserver,omitempty"` + TLS ClientTLSConfig `json:"tls,omitempty"` + Enrollment EnrollmentRequest `json:"enrollment,omitempty"` +} + +// EnrollmentRequest is a request to enroll an identity +type EnrollmentRequest struct { + // The identity name to enroll + Name string `json:"name"` + // The secret returned via Register + Secret string `json:"secret,omitempty"` + // CAName is the name of the CA to connect to + CAName string `json:"caname,omitempty"` + // AttrReqs are requests for attributes to add to the certificate. + // Each attribute is added only if the requestor owns the attribute. + AttrReqs []*AttributeRequest `json:"attr_reqs,omitempty"` + // Profile is the name of the signing profile to use in issuing the X509 certificate + Profile string `json:"profile,omitempty"` + // Label is the label to use in HSM operations + Label string `json:"label,omitempty"` + // CSR is Certificate Signing Request info + CSR *CSRInfo `json:"csr,omitempty"` // Skipping this because we pull the CSR from the CSR flags + // The type of the enrollment request: x509 or idemix + // The default is a request for an X509 enrollment certificate + Type string `def:"x509"` +} + +type AttributeRequest struct { + Name string `json:"name"` + Optional *bool `json:"optional,omitempty"` +} + +// ClientTLSConfig defines the key material for a TLS client +type ClientTLSConfig struct { + Enabled *bool `json:"enabled,omitempty"` + CertFiles []string `json:"certfiles,omitempty"` + Client KeyCertFiles `json:"client,omitempty"` +} + +type ServerTLSConfig struct { + Enabled *bool `json:"enabled,omitempty"` + CertFile string `json:"certfile,omitempty"` + KeyFile string `json:"keyfile,omitempty"` + ClientAuth ClientAuth `json:"clientauth,omitempty"` +} + +// ClientAuth defines the key material needed to verify client certificates +type ClientAuth struct { + Type string `json:"type,omitempty"` + CertFiles []string `json:"certfiles,omitempty"` +} + +// KeyCertFiles defines the files need for client on TLS +type KeyCertFiles struct { + KeyFile string `json:"keyfile,omitempty"` + CertFile string `json:"certfile,omitempty"` +} + +// CRLConfig contains configuration options used by the gencrl request handler +type CRLConfig struct { + // Specifies expiration for the CRL generated by the gencrl request + // The number of hours specified by this property is added to the UTC time, resulting time + // is used to set the 'Next Update' date of the CRL + Expiry commonapi.Duration `json:"expiry,omitempty"` +} + +// Options contains configuration for the operations system +type Options struct { + ListenAddress string `json:"listenaddress,omitempty"` + Metrics MetricsOptions `json:"metrics,omitempty"` + TLS TLS `json:"tls,omitempty"` +} + +// MetricsOptions contains the information on providers +type MetricsOptions struct { + Provider string `json:"provider,omitempty"` + Statsd *Statsd `json:"statsd,omitempty"` +} + +// TLS contains the TLS configuration for the operations system serve +type TLS struct { + Enabled *bool `json:"enabled,omitempty"` + CertFile string `json:"certfile,omitempty"` + KeyFile string `json:"keyfile,omitempty"` + ClientCertRequired *bool `json:"clientcerrequired,omitempty"` + ClientCACertFiles []string `json:"clientcacertfiles,omitempty"` +} + +// Statsd contains configuration of statsd +type Statsd struct { + Network string `json:"network,omitempty"` + Address string `json:"address,omitempty"` + WriteInterval commonapi.Duration `json:"writeinterval,omitempty"` + Prefix string `json:"prefix,omitempty"` +} diff --git a/pkg/apis/ca/v1/functions.go b/pkg/apis/ca/v1/functions.go new file mode 100644 index 00000000..677c13e3 --- /dev/null +++ b/pkg/apis/ca/v1/functions.go @@ -0,0 +1,40 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +func (c *ClientTLSConfig) IsEnabled() bool { + if c.Enabled != nil { + return *c.Enabled + } + return false +} + +func (s *ServerTLSConfig) IsEnabled() bool { + if s.Enabled != nil { + return *s.Enabled + } + return false +} + +func (t *TLS) IsEnabled() bool { + if t.Enabled != nil { + return *t.Enabled + } + return false +} diff --git a/pkg/apis/common/common.go b/pkg/apis/common/common.go new file mode 100644 index 00000000..71954401 --- /dev/null +++ b/pkg/apis/common/common.go @@ -0,0 +1,123 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "strings" + "time" +) + +type Duration struct { + time.Duration `json:",inline"` +} + +// Decode is custom decoder for `envconfig` library, this +// method is used to handle reading in environment variables +// and converting them into the type that is expected in +// our structs +func (d *Duration) Decode(value string) error { + dur, err := time.ParseDuration(value) + if err != nil { + return err + } + + d.Duration = dur + return nil +} + +// Unmarshal is custom unmarshaler for github.com/kelseyhightower/envconfig +func (d *Duration) Unmarshal(s string) (err error) { + if s == "" { + return + } + + d.Duration, err = time.ParseDuration(strings.Trim(string(s), `"`)) + return +} + +func (d *Duration) UnmarshalJSON(b []byte) (err error) { + if b == nil { + return + } + if string(b) == "null" { + return + } + d.Duration, err = time.ParseDuration(strings.Trim(string(b), `"`)) + return +} + +func (d *Duration) Get() time.Duration { + return d.Duration +} + +func (d Duration) MarshalJSON() (b []byte, err error) { + return []byte(fmt.Sprintf(`"%s"`, d.String())), nil +} + +func ParseDuration(d string) (Duration, error) { + duration, err := time.ParseDuration(strings.Trim(string(d), `"`)) + if err != nil { + return Duration{}, err + } + + return Duration{duration}, nil +} + +func MustParseDuration(d string) Duration { + duration, err := time.ParseDuration(strings.Trim(string(d), `"`)) + if err != nil { + return Duration{} + } + + return Duration{duration} +} + +func ConvertTimeDuration(d time.Duration) Duration { + return Duration{d} +} + +type BCCSP struct { + ProviderName string `json:"default,omitempty"` + SW *SwOpts `json:"SW,omitempty"` + PKCS11 *PKCS11Opts `json:"PKCS11,omitempty"` +} + +// SwOpts contains options for the SWFactory +type SwOpts struct { + SecLevel int `json:"security,omitempty"` + HashFamily string `json:"hash,omitempty"` + FileKeyStore FileKeyStoreOpts `json:"filekeystore,omitempty"` +} + +type PKCS11Opts struct { + SecLevel int `json:"security,omitempty"` + HashFamily string `json:"hash,omitempty"` + Library string `json:"library,omitempty"` + Label string `json:"label,omitempty"` + Pin string `json:"pin,omitempty"` + Ephemeral bool `json:"tempkeys,omitempty"` + SoftVerify bool `json:"softwareVerify,omitempty"` + Immutable bool `json:"immutable,omitempty"` + FileKeyStore *FileKeyStoreOpts `json:"filekeystore,omitempty"` +} + +type FileKeyStoreOpts struct { + KeyStorePath string `json:"keystore,omitempty"` +} diff --git a/pkg/apis/console/v1/console.go b/pkg/apis/console/v1/console.go new file mode 100644 index 00000000..d218ab3d --- /dev/null +++ b/pkg/apis/console/v1/console.go @@ -0,0 +1,121 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +type DBCustomNames struct { + Components string `json:"DB_COMPONENTS"` + Sessions string `json:"DB_SESSIONS"` + System string `json:"DB_SYSTEM"` +} + +type FabricCapabilites struct { + Application []string `json:"application"` + Channel []string `json:"channel"` + Orderer []string `json:"orderer"` +} + +type IBMID struct { + URL string `json:"url,omitempty"` + ClientID string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` +} + +// IBPConsoleStructureData provides the clsuter info the console +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=true +type IBPConsoleClusterData struct { + // Zones provides the zones available + Zones []string `json:"zones,omitempty"` + + // Type provides the type of cluster + Type string `json:"type,omitempty"` + + Namespace string `json:"namespace,omitempty"` +} + +// +k8s:deepcopy-gen=true +type InfraImportOptions struct { + Platform string `json:"platform,omitempty"` + SupportedCAs []string `json:"supported_cas,omitempty"` + SupportedOrderers []string `json:"supported_orderers,omitempty"` + SupportedPeers []string `json:"supported_peers,omitempty"` +} + +// +k8s:deepcopy-gen=true +type FeatureFlags struct { + ImportOnlyEnabled *bool `json:"import_only_enabled,omitempty"` + ReadOnlyEnabled *bool `json:"read_only_enabled,omitempty"` + CreateChannelEnabled bool `json:"create_channel_enabled,omitempty"` + RemotePeerConfigEnabled bool `json:"remote_peer_config_enabled,omitempty"` + SaasEnabled bool `json:"saas_enabled,omitempty"` + TemplatesEnabled bool `json:"templates_enabled,omitempty"` + CapabilitiesEnabled bool `json:"capabilities_enabled,omitempty"` + HighAvailability bool `json:"high_availability,omitempty"` + EnableNodeOU bool `json:"enable_ou_identifier,omitempty"` + HSMEnabled bool `json:"hsm_enabled,omitempty"` + ScaleRaftNodesEnabled bool `json:"scale_raft_nodes_enabled,omitempty"` + InfraImportOptions *InfraImportOptions `json:"infra_import_options,omitempty"` + Lifecycle20Enabled bool `json:"lifecycle2_0_enabled,omitempty"` + Patch14to20Enabled bool `json:"patch_1_4to2_x_enabled,omitempty"` + DevMode bool `json:"dev_mode,omitempty"` + MustgatherEnabled bool `json:"mustgather_enabled,omitempty"` +} + +// Added here to avoid the Circular dependency +type CRN struct { + Version string `json:"version,omitempty"` + CName string `json:"c_name,omitempty"` + CType string `json:"c_type,omitempty"` + Servicename string `json:"service_name,omitempty"` + Location string `json:"location,omitempty"` + AccountID string `json:"account_id,omitempty"` + InstanceID string `json:"instance_id,omitempty"` + ResourceType string `json:"resource_type,omitempty"` + ResourceID string `json:"resource_id,omitempty"` +} + +type ConsoleSettingsConfig struct { + Version string `json:"version"` + Email string `json:"initial_admin"` + AuthScheme string `json:"auth_scheme"` + AllowDefaultPassword bool `json:"allow_default_password"` + Configtxlator string `json:"configtxlator"` + DeployerURL string `json:"deployer_url"` + DeployerTimeout int32 `json:"deployer_timeout"` + HSM string `json:"hsm"` + SegmentWriteKey string `json:"segment_write_key"` + DBCustomNames DBCustomNames `json:"db_custom_names"` + EnforceBackendSSL bool `json:"enforce_backend_ssl"` + SystemChannelID string `json:"system_channel_id"` + DynamicTLS bool `json:"dynamic_tls"` + DynamicConfig bool `json:"dynamic_config"` + Zone string `json:"zone"` + Infrastructure string `json:"infrastructure"` + FabricCapabilites FabricCapabilites `json:"fabric_capabilities"` + ClusterData *IBPConsoleClusterData `json:"cluster_data"` + ProxyTLSReqs string `json:"proxy_tls_fabric_reqs"` + ProxyTLSUrl string `json:"proxy_tls_ws_url"` + Featureflags *FeatureFlags `json:"feature_flags"` + IBMID *IBMID `json:"ibmid,omitempty"` + IAMApiKey string `json:"iam_api_key,omitempty"` + CRN *CRN `json:"crn,omitempty"` + CRNString string `json:"crn_string,omitempty"` + ActivityTrackerPath string `json:"activity_tracker_path,omitempty"` + TrustProxy string `json:"trust_proxy,omitempty"` +} diff --git a/pkg/apis/console/v1/zz_generated.deepcopy.go b/pkg/apis/console/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..0a4d7a89 --- /dev/null +++ b/pkg/apis/console/v1/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureFlags) DeepCopyInto(out *FeatureFlags) { + *out = *in + if in.ImportOnlyEnabled != nil { + in, out := &in.ImportOnlyEnabled, &out.ImportOnlyEnabled + *out = new(bool) + **out = **in + } + if in.ReadOnlyEnabled != nil { + in, out := &in.ReadOnlyEnabled, &out.ReadOnlyEnabled + *out = new(bool) + **out = **in + } + if in.InfraImportOptions != nil { + in, out := &in.InfraImportOptions, &out.InfraImportOptions + *out = new(InfraImportOptions) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureFlags. +func (in *FeatureFlags) DeepCopy() *FeatureFlags { + if in == nil { + return nil + } + out := new(FeatureFlags) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPConsoleClusterData) DeepCopyInto(out *IBPConsoleClusterData) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPConsoleClusterData. +func (in *IBPConsoleClusterData) DeepCopy() *IBPConsoleClusterData { + if in == nil { + return nil + } + out := new(IBPConsoleClusterData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraImportOptions) DeepCopyInto(out *InfraImportOptions) { + *out = *in + if in.SupportedCAs != nil { + in, out := &in.SupportedCAs, &out.SupportedCAs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SupportedOrderers != nil { + in, out := &in.SupportedOrderers, &out.SupportedOrderers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SupportedPeers != nil { + in, out := &in.SupportedPeers, &out.SupportedPeers + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraImportOptions. +func (in *InfraImportOptions) DeepCopy() *InfraImportOptions { + if in == nil { + return nil + } + out := new(InfraImportOptions) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/deployer/deployer.go b/pkg/apis/deployer/deployer.go new file mode 100644 index 00000000..1b4eb7e1 --- /dev/null +++ b/pkg/apis/deployer/deployer.go @@ -0,0 +1,549 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package deployer + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + corev1 "k8s.io/api/core/v1" +) + +type Config struct { + ClusterType string `json:"clusterType"` + Domain string `json:"domain"` + DashboardURL string `json:"dashboardurl"` + Database Database `json:"db"` + Loglevel string `json:"loglevel"` + Port int `json:"port"` + TLS TLSConfig `json:"tls"` + Auth BasicAuth `json:"auth"` + Namespace string `json:"namespace"` + Defaults *Defaults `json:"defaults"` + Versions *Versions `json:"versions"` + ImagePullSecrets []string `json:"imagePullSecrets"` + ServiceConfig ServiceConfig `json:"serviceConfig"` + CRN *current.CRN `json:"crn"` + Timeouts *Timeouts `json:"timeouts"` + OtherImages *OtherImages `json:"otherImages"` + ServiceAccount string `json:"serviceAccount"` + UseTags *bool `json:"usetags"` +} + +type Versions struct { + CA map[string]VersionCA `json:"ca"` + Peer map[string]VersionPeer `json:"peer"` + Orderer map[string]VersionOrderer `json:"orderer"` +} + +type VersionCA struct { + Default bool `json:"default"` + Version string `json:"version"` + Image CAImages `json:"image,omitempty"` +} + +type VersionOrderer struct { + Default bool `json:"default"` + Version string `json:"version"` + Image OrdererImages `json:"image,omitempty"` +} +type VersionPeer struct { + Default bool `json:"default"` + Version string `json:"version"` + Image PeerImages `json:"image,omitempty"` +} + +// CAImages is the list of images to be used in CA deployment +type CAImages struct { + // CAImage is the name of the CA image + CAImage string `json:"caImage,omitempty"` + + // CATag is the tag of the CA image + CATag string `json:"caTag,omitempty"` + + // CADigest is the digest tag of the CA image + CADigest string `json:"caDigest,omitempty"` + + // CAInitImage is the name of the Init image + CAInitImage string `json:"caInitImage,omitempty"` + + // CAInitTag is the tag of the Init image + CAInitTag string `json:"caInitTag,omitempty"` + + // CAInitDigest is the digest tag of the Init image + CAInitDigest string `json:"caInitDigest,omitempty"` + + // HSMImage is the name of the HSM image + HSMImage string `json:"hsmImage,omitempty"` + + // HSMTag is the tag of the HSM image + HSMTag string `json:"hsmTag,omitempty"` + + // HSMDigest is the tag of the HSM image + HSMDigest string `json:"hsmDigest,omitempty"` + + // EnrollerImage is the name of the init image for crypto generation + EnrollerImage string `json:"enrollerImage,omitempty"` + + // EnrollerTag is the tag of the init image for crypto generation + EnrollerTag string `json:"enrollerTag,omitempty"` + + // EnrollerDigest is the digest tag of the init image for crypto generation + EnrollerDigest string `json:"enrollerDigest,omitempty"` +} + +// PeerImages is the list of images to be used in peer deployment +type PeerImages struct { + // PeerInitImage is the name of the peer init image + PeerInitImage string `json:"peerInitImage,omitempty"` + + // PeerInitTag is the tag of the peer init image + PeerInitTag string `json:"peerInitTag,omitempty"` + + // PeerInitDigest is the digest tag of the peer init image + PeerInitDigest string `json:"peerInitDigest,omitempty"` + + // PeerImage is the name of the peer image + PeerImage string `json:"peerImage,omitempty"` + + // PeerTag is the tag of the peer image + PeerTag string `json:"peerTag,omitempty"` + + // PeerDigest is the digest tag of the peer image + PeerDigest string `json:"peerDigest,omitempty"` + + // DindImage is the name of the dind image + DindImage string `json:"dindImage,omitempty"` + + // DindTag is the tag of the dind image + DindTag string `json:"dindTag,omitempty"` + + // DindDigest is the digest tag of the dind image + DindDigest string `json:"dindDigest,omitempty"` + + // GRPCWebImage is the name of the grpc web proxy image + GRPCWebImage string `json:"grpcwebImage,omitempty"` + + // GRPCWebTag is the tag of the grpc web proxy image + GRPCWebTag string `json:"grpcwebTag,omitempty"` + + // GRPCWebDigest is the digest tag of the grpc web proxy image + GRPCWebDigest string `json:"grpcwebDigest,omitempty"` + + // FluentdImage is the name of the fluentd logger image + FluentdImage string `json:"fluentdImage,omitempty"` + + // FluentdTag is the tag of the fluentd logger image + FluentdTag string `json:"fluentdTag,omitempty"` + + // FluentdDigest is the digest tag of the fluentd logger image + FluentdDigest string `json:"fluentdDigest,omitempty"` + + // CouchDBImage is the name of the couchdb image + CouchDBImage string `json:"couchdbImage,omitempty"` + + // CouchDBTag is the tag of the couchdb image + CouchDBTag string `json:"couchdbTag,omitempty"` + + // CouchDBDigest is the digest tag of the couchdb image + CouchDBDigest string `json:"couchdbDigest,omitempty"` + + // CCLauncherImage is the name of the chaincode launcher image + CCLauncherImage string `json:"chaincodeLauncherImage,omitempty"` + + // CCLauncherTag is the tag of the chaincode launcher image + CCLauncherTag string `json:"chaincodeLauncherTag,omitempty"` + + // CCLauncherDigest is the digest tag of the chaincode launcher image + CCLauncherDigest string `json:"chaincodeLauncherDigest,omitempty"` + + // FileTransferImage is the name of the file transfer image + FileTransferImage string `json:"fileTransferImage,omitempty"` + + // FileTransferTag is the tag of the file transfer image + FileTransferTag string `json:"fileTransferTag,omitempty"` + + // FileTransferDigest is the digest tag of the file transfer image + FileTransferDigest string `json:"fileTransferDigest,omitempty"` + + // BuilderImage is the name of the builder image + BuilderImage string `json:"builderImage,omitempty"` + + // BuilderTag is the tag of the builder image + BuilderTag string `json:"builderTag,omitempty"` + + // BuilderDigest is the digest tag of the builder image + BuilderDigest string `json:"builderDigest,omitempty"` + + // GoEnvImage is the name of the goenv image + GoEnvImage string `json:"goEnvImage,omitempty"` + + // GoEnvTag is the tag of the goenv image + GoEnvTag string `json:"goEnvTag,omitempty"` + + // GoEnvDigest is the digest tag of the goenv image + GoEnvDigest string `json:"goEnvDigest,omitempty"` + + // JavaEnvImage is the name of the javaenv image + JavaEnvImage string `json:"javaEnvImage,omitempty"` + + // JavaEnvTag is the tag of the javaenv image + JavaEnvTag string `json:"javaEnvTag,omitempty"` + + // JavaEnvDigest is the digest tag of the javaenv image + JavaEnvDigest string `json:"javaEnvDigest,omitempty"` + + // NodeEnvImage is the name of the nodeenv image + NodeEnvImage string `json:"nodeEnvImage,omitempty"` + + // NodeEnvTag is the tag of the nodeenv image + NodeEnvTag string `json:"nodeEnvTag,omitempty"` + + // NodeEnvDigest is the digest tag of the nodeenv image + NodeEnvDigest string `json:"nodeEnvDigest,omitempty"` + + // HSMImage is the name of the hsm image + HSMImage string `json:"hsmImage,omitempty"` + + // HSMTag is the tag of the hsm image + HSMTag string `json:"hsmTag,omitempty"` + + // HSMDigest is the digest tag of the hsm image + HSMDigest string `json:"hsmDigest,omitempty"` + + // EnrollerImage is the name of the init image for crypto generation + EnrollerImage string `json:"enrollerImage,omitempty"` + + // EnrollerTag is the tag of the init image for crypto generation + EnrollerTag string `json:"enrollerTag,omitempty"` + + // EnrollerDigest is the digest tag of the init image for crypto generation + EnrollerDigest string `json:"enrollerDigest,omitempty"` +} + +// OrdererImages is the list of images to be used in orderer deployment +type OrdererImages struct { + // OrdererInitImage is the name of the orderer init image + OrdererInitImage string `json:"ordererInitImage,omitempty"` + + // OrdererInitTag is the tag of the orderer init image + OrdererInitTag string `json:"ordererInitTag,omitempty"` + + // OrdererInitDigest is the digest tag of the orderer init image + OrdererInitDigest string `json:"ordererInitDigest,omitempty"` + + // OrdererImage is the name of the orderer image + OrdererImage string `json:"ordererImage,omitempty"` + + // OrdererTag is the tag of the orderer image + OrdererTag string `json:"ordererTag,omitempty"` + + // OrdererDigest is the digest tag of the orderer image + OrdererDigest string `json:"ordererDigest,omitempty"` + + // GRPCWebImage is the name of the grpc web proxy image + GRPCWebImage string `json:"grpcwebImage,omitempty"` + + // GRPCWebTag is the tag of the grpc web proxy image + GRPCWebTag string `json:"grpcwebTag,omitempty"` + + // GRPCWebDigest is the digest tag of the grpc web proxy image + GRPCWebDigest string `json:"grpcwebDigest,omitempty"` + + // HSMImage is the name of the hsm image + HSMImage string `json:"hsmImage,omitempty"` + + // HSMTag is the tag of the hsm image + HSMTag string `json:"hsmTag,omitempty"` + + // HSMDigest is the digest tag of the hsm image + HSMDigest string `json:"hsmDigest,omitempty"` + + // EnrollerImage is the name of the init image for crypto generation + EnrollerImage string `json:"enrollerImage,omitempty"` + + // EnrollerTag is the tag of the init image for crypto generation + EnrollerTag string `json:"enrollerTag,omitempty"` + + // EnrollerDigest is the digest tag of the init image for crypto generation + EnrollerDigest string `json:"enrollerDigest,omitempty"` +} + +type Defaults struct { + Storage *Storage `json:"storage"` + Resources *Resources `json:"resources"` +} + +type Storage struct { + Peer *current.PeerStorages `json:"peer"` + CA *current.CAStorages `json:"ca"` + Orderer *current.OrdererStorages `json:"orderer"` +} + +type Resources struct { + Peer *current.PeerResources `json:"peer"` + CA *current.CAResources `json:"ca"` + Orderer *current.OrdererResources `json:"orderer"` +} + +type ServiceConfig struct { + Type corev1.ServiceType `json:"type"` +} + +// IndividualDatabase describes the initialization of databases +type IndividualDatabase struct { + Name string `json:"name"` + DesignDocs []string `json:"designdocs"` +} + +// Database is connection details to connect to couchdb database +type Database struct { + ConnectionURL string `json:"connectionurl"` + Components IndividualDatabase `json:"components"` + CreateDB bool `json:"createdb"` +} + +// TLSConfig is to configure the tls server +type TLSConfig struct { + Enabled bool `json:"enabled"` + ListenAddress string `json:"listenaddress"` + CertPath string `json:"certpath"` + KeyPath string `json:"keypath"` +} + +// BasicAuth provides implementation to store basic auth info +type BasicAuth struct { + Username string `json:"username"` + Password string `json:"password"` +} + +type Timeouts struct { + Deployment int `json:"componentDeploy"` + APIServer int `json:"apiServer"` +} + +// OtherImages contains other images and tags required to run deployer. +type OtherImages struct { + // MustgatherImage is the name of the mustgather image + MustgatherImage string `json:"mustgatherImage,omitempty"` + + // MustgatherTag is the tag of the mustgatherTag image + MustgatherTag string `json:"mustgatherTag,omitempty"` + + // MustgatherDigest is the tag of the mustgatherDigest image + MustgatherDigest string `json:"mustgatherDigest,omitempty"` +} + +// ConsoleImages is the list of images to be used in console deployment +type ConsoleImages struct { + // ConsoleInitImage is the name of the console init image + ConsoleInitImage string `json:"consoleInitImage,omitempty"` + // ConsoleInitTag is the tag of the console init image + ConsoleInitTag string `json:"consoleInitTag,omitempty"` + // ConsoleInitDigest is the digest of the console init image + ConsoleInitDigest string `json:"consoleInitDigest,omitempty"` + + // ConsoleImage is the name of the console image + ConsoleImage string `json:"consoleImage,omitempty"` + // ConsoleTag is the tag of the console image + ConsoleTag string `json:"consoleTag,omitempty"` + // ConsoleDigest is the digest of the console image + ConsoleDigest string `json:"consoleDigest,omitempty"` + + // ConfigtxlatorImage is the name of the configtxlator image + ConfigtxlatorImage string `json:"configtxlatorImage,omitempty"` + // ConfigtxlatorTag is the tag of the configtxlator image + ConfigtxlatorTag string `json:"configtxlatorTag,omitempty"` + // ConfigtxlatorDigest is the digest of the configtxlator image + ConfigtxlatorDigest string `json:"configtxlatorDigest,omitempty"` + + // DeployerImage is the name of the deployer image + DeployerImage string `json:"deployerImage,omitempty"` + // DeployerTag is the tag of the deployer image + DeployerTag string `json:"deployerTag,omitempty"` + // DeployerDigest is the digest of the deployer image + DeployerDigest string `json:"deployerDigest,omitempty"` + + // CouchDBImage is the name of the couchdb image + CouchDBImage string `json:"couchdbImage,omitempty"` + // CouchDBTag is the tag of the couchdb image + CouchDBTag string `json:"couchdbTag,omitempty"` + // CouchDBDigest is the digest of the couchdb image + CouchDBDigest string `json:"couchdbDigest,omitempty"` + + // MustgatherImage is the name of the mustgather image + MustgatherImage string `json:"mustgatherImage,omitempty"` + // MustgatherTag is the tag of the mustgather image + MustgatherTag string `json:"mustgatherTag,omitempty"` + // MustgatherDigest is the digest of the mustgather image + MustgatherDigest string `json:"mustgatherDigest,omitempty"` +} + +func (v *Versions) Override(requestedVersions *Versions, registryURL string, arch string) { + if requestedVersions == nil { + return + } + + if len(requestedVersions.CA) != 0 { + CAVersions := map[string]VersionCA{} + for key, _ := range requestedVersions.CA { + var caConfig VersionCA + requestedCAVersion := requestedVersions.CA[key] + caConfig.Image.Override(&requestedCAVersion.Image, registryURL, arch) + caConfig.Default = requestedCAVersion.Default + caConfig.Version = requestedCAVersion.Version + CAVersions[key] = caConfig + } + v.CA = CAVersions + } + + if len(requestedVersions.Peer) != 0 { + PeerVersions := map[string]VersionPeer{} + for key, _ := range requestedVersions.Peer { + var peerConfig VersionPeer + requestedPeerVersion := requestedVersions.Peer[key] + peerConfig.Image.Override(&requestedPeerVersion.Image, registryURL, arch) + peerConfig.Default = requestedPeerVersion.Default + peerConfig.Version = requestedPeerVersion.Version + PeerVersions[key] = peerConfig + } + v.Peer = PeerVersions + } + + if len(requestedVersions.Orderer) != 0 { + OrdererVersions := map[string]VersionOrderer{} + for key, _ := range requestedVersions.Orderer { + var ordererConfig VersionOrderer + requestedOrdererVersion := requestedVersions.Orderer[key] + ordererConfig.Image.Override(&requestedOrdererVersion.Image, registryURL, arch) + ordererConfig.Default = requestedOrdererVersion.Default + ordererConfig.Version = requestedOrdererVersion.Version + OrdererVersions[key] = ordererConfig + } + v.Orderer = OrdererVersions + } +} + +// Override will look at requested images and use those to override default image +// values. Override also format the image tag to include arch for non-sha based +// tags. +func (i *CAImages) Override(requested *CAImages, registryURL string, arch string) { + // If requested is nil, we are only interested in properly prepending registry + // URL to the image and with overriding default values so a empty struct is initialized. + if requested == nil { + requested = &CAImages{} + } + + // Images + i.CAInitImage = image.GetImage(registryURL, i.CAInitImage, requested.CAInitImage) + i.CAImage = image.GetImage(registryURL, i.CAImage, requested.CAImage) + i.HSMImage = image.GetImage(registryURL, i.HSMImage, requested.HSMImage) + i.EnrollerImage = image.GetImage(registryURL, i.EnrollerImage, requested.EnrollerImage) + + // Tags + i.CAInitTag = image.GetTag(arch, i.CAInitTag, requested.CAInitTag) + i.CATag = image.GetTag(arch, i.CATag, requested.CATag) + i.HSMTag = image.GetTag(arch, i.HSMTag, requested.HSMTag) + i.EnrollerTag = image.GetTag(arch, i.EnrollerTag, requested.EnrollerTag) + + // Digests + i.CAInitDigest = image.GetTag(arch, i.CAInitDigest, requested.CAInitDigest) + i.CADigest = image.GetTag(arch, i.CADigest, requested.CADigest) + i.HSMDigest = image.GetTag(arch, i.HSMDigest, requested.HSMDigest) + i.EnrollerDigest = image.GetTag(arch, i.EnrollerDigest, requested.EnrollerDigest) +} + +func (i *PeerImages) Override(requested *PeerImages, registryURL string, arch string) { + if requested == nil { + requested = &PeerImages{} + } + + // Images + i.PeerInitImage = image.GetImage(registryURL, i.PeerInitImage, requested.PeerInitImage) + i.PeerImage = image.GetImage(registryURL, i.PeerImage, requested.PeerImage) + i.CouchDBImage = image.GetImage(registryURL, i.CouchDBImage, requested.CouchDBImage) + i.DindImage = image.GetImage(registryURL, i.DindImage, requested.DindImage) + i.GRPCWebImage = image.GetImage(registryURL, i.GRPCWebImage, requested.GRPCWebImage) + i.FluentdImage = image.GetImage(registryURL, i.FluentdImage, requested.FluentdImage) + i.CCLauncherImage = image.GetImage(registryURL, i.CCLauncherImage, requested.CCLauncherImage) + i.FileTransferImage = image.GetImage(registryURL, i.FileTransferImage, requested.FileTransferImage) + i.BuilderImage = image.GetImage(registryURL, i.BuilderImage, requested.BuilderImage) + i.GoEnvImage = image.GetImage(registryURL, i.GoEnvImage, requested.GoEnvImage) + i.JavaEnvImage = image.GetImage(registryURL, i.JavaEnvImage, requested.JavaEnvImage) + i.NodeEnvImage = image.GetImage(registryURL, i.NodeEnvImage, requested.NodeEnvImage) + i.HSMImage = image.GetImage(registryURL, i.HSMImage, requested.HSMImage) + i.EnrollerImage = image.GetImage(registryURL, i.EnrollerImage, requested.EnrollerImage) + + // Tags + i.PeerInitTag = image.GetTag(arch, i.PeerInitTag, requested.PeerInitTag) + i.PeerTag = image.GetTag(arch, i.PeerTag, requested.PeerTag) + i.CouchDBTag = image.GetTag(arch, i.CouchDBTag, requested.CouchDBTag) + i.DindTag = image.GetTag(arch, i.DindTag, requested.DindTag) + i.GRPCWebTag = image.GetTag(arch, i.GRPCWebTag, requested.GRPCWebTag) + i.FluentdTag = image.GetTag(arch, i.FluentdTag, requested.FluentdTag) + i.CCLauncherTag = image.GetTag(arch, i.CCLauncherTag, requested.CCLauncherTag) + i.FileTransferTag = image.GetTag(arch, i.FileTransferTag, requested.FileTransferTag) + i.BuilderTag = image.GetTag(arch, i.BuilderTag, requested.BuilderTag) + i.GoEnvTag = image.GetTag(arch, i.GoEnvTag, requested.GoEnvTag) + i.JavaEnvTag = image.GetTag(arch, i.JavaEnvTag, requested.JavaEnvTag) + i.NodeEnvTag = image.GetTag(arch, i.NodeEnvTag, requested.NodeEnvTag) + i.HSMTag = image.GetTag(arch, i.HSMTag, requested.HSMTag) + i.EnrollerTag = image.GetTag(arch, i.EnrollerTag, requested.EnrollerTag) + + // Digests + i.PeerInitDigest = image.GetTag(arch, i.PeerInitDigest, requested.PeerInitDigest) + i.PeerDigest = image.GetTag(arch, i.PeerDigest, requested.PeerDigest) + i.CouchDBDigest = image.GetTag(arch, i.CouchDBDigest, requested.CouchDBDigest) + i.DindDigest = image.GetTag(arch, i.DindDigest, requested.DindDigest) + i.GRPCWebDigest = image.GetTag(arch, i.GRPCWebDigest, requested.GRPCWebDigest) + i.FluentdDigest = image.GetTag(arch, i.FluentdDigest, requested.FluentdDigest) + i.CCLauncherDigest = image.GetTag(arch, i.CCLauncherDigest, requested.CCLauncherDigest) + i.FileTransferDigest = image.GetTag(arch, i.FileTransferDigest, requested.FileTransferDigest) + i.BuilderDigest = image.GetTag(arch, i.BuilderDigest, requested.BuilderDigest) + i.GoEnvDigest = image.GetTag(arch, i.GoEnvDigest, requested.GoEnvDigest) + i.JavaEnvDigest = image.GetTag(arch, i.JavaEnvDigest, requested.JavaEnvDigest) + i.NodeEnvDigest = image.GetTag(arch, i.NodeEnvDigest, requested.NodeEnvDigest) + i.HSMDigest = image.GetTag(arch, i.HSMDigest, requested.HSMDigest) + i.EnrollerDigest = image.GetTag(arch, i.EnrollerDigest, requested.EnrollerDigest) +} + +func (i *OrdererImages) Override(requested *OrdererImages, registryURL string, arch string) { + if requested == nil { + requested = &OrdererImages{} + } + // Images + i.GRPCWebImage = image.GetImage(registryURL, i.GRPCWebImage, requested.GRPCWebImage) + i.OrdererInitImage = image.GetImage(registryURL, i.OrdererInitImage, requested.OrdererInitImage) + i.OrdererImage = image.GetImage(registryURL, i.OrdererImage, requested.OrdererImage) + i.HSMImage = image.GetImage(registryURL, i.HSMImage, requested.HSMImage) + i.EnrollerImage = image.GetImage(registryURL, i.EnrollerImage, requested.EnrollerImage) + + // Tags + i.GRPCWebTag = image.GetTag(arch, i.GRPCWebTag, requested.GRPCWebTag) + i.OrdererInitTag = image.GetTag(arch, i.OrdererInitTag, requested.OrdererInitTag) + i.OrdererTag = image.GetTag(arch, i.OrdererTag, requested.OrdererTag) + i.HSMTag = image.GetTag(arch, i.HSMTag, requested.HSMTag) + i.EnrollerTag = image.GetTag(arch, i.EnrollerTag, requested.EnrollerTag) + + // Digests + i.GRPCWebDigest = image.GetTag(arch, i.GRPCWebDigest, requested.GRPCWebDigest) + i.OrdererInitDigest = image.GetTag(arch, i.OrdererInitDigest, requested.OrdererInitDigest) + i.OrdererDigest = image.GetTag(arch, i.OrdererDigest, requested.OrdererDigest) + i.HSMDigest = image.GetTag(arch, i.HSMDigest, requested.HSMDigest) + i.EnrollerDigest = image.GetTag(arch, i.EnrollerDigest, requested.EnrollerDigest) +} diff --git a/pkg/apis/orderer/v1/orderer.go b/pkg/apis/orderer/v1/orderer.go new file mode 100644 index 00000000..e01c0e1b --- /dev/null +++ b/pkg/apis/orderer/v1/orderer.go @@ -0,0 +1,188 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" +) + +type Orderer struct { + General General `json:"general,omitempty"` + FileLedger FileLedger `json:"fileLedger,omitempty"` + Debug Debug `json:"debug,omitempty"` + Consensus interface{} `json:"consensus,omitempty"` + Operations Operations `json:"operations,omitempty"` + Metrics Metrics `json:"metrics,omitempty"` +} + +// General contains config which should be common among all orderer types. +type General struct { + LedgerType string `json:"ledgerType,omitempty"` + ListenAddress string `json:"listenAddress,omitempty"` + ListenPort uint16 `json:"listenPort,omitempty"` + TLS TLS `json:"tls,omitempty"` + Cluster Cluster `json:"cluster,omitempty"` + Keepalive Keepalive `json:"keepalive,omitempty"` + ConnectionTimeout commonapi.Duration `json:"connectionTimeout,omitempty"` + GenesisMethod string `json:"genesisMethod,omitempty"` + GenesisFile string `json:"genesisFile,omitempty"` // For compatibility only, will be replaced by BootstrapFile + BootstrapFile string `json:"bootstrapFile,omitempty"` + Profile Profile `json:"profile,omitempty"` + LocalMSPDir string `json:"localMspDir,omitempty"` + LocalMSPID string `json:"localMspId,omitempty"` + BCCSP *commonapi.BCCSP `json:"BCCSP,omitempty"` + Authentication Authentication `json:"authentication,omitempty"` +} + +type Cluster struct { + ListenAddress string `json:"listenAddress,omitempty"` + ListenPort uint16 `json:"listenPort,omitempty"` + ServerCertificate string `json:"serverCertificate,omitempty"` + ServerPrivateKey string `json:"serverPrivateKey,omitempty"` + ClientCertificate string `json:"clientCertificate,omitempty"` + ClientPrivateKey string `json:"clientPrivateKey,omitempty"` + RootCAs []string `json:"rootCas,omitempty"` + DialTimeout commonapi.Duration `json:"dialTimeout,omitempty"` + RPCTimeout commonapi.Duration `json:"rpcTimeout,omitempty"` + ReplicationBufferSize int `json:"replicationBufferSize,omitempty"` + ReplicationPullTimeout commonapi.Duration `json:"replicationPullTimeout,omitempty"` + ReplicationRetryTimeout commonapi.Duration `json:"replicationRetryTimeout,omitempty"` + ReplicationBackgroundRefreshInterval commonapi.Duration `json:"replicationBackgroundRefreshInterval,omitempty"` + ReplicationMaxRetries int `json:"replicationMaxRetries,omitempty"` + SendBufferSize int `json:"sendBufferSize,omitempty"` + CertExpirationWarningThreshold commonapi.Duration `json:"certExpirationWarningThreshold,omitempty"` + TLSHandshakeTimeShift commonapi.Duration `json:"tlsHandshakeTimeShift,omitempty"` +} + +// Keepalive contains configuration for gRPC servers. +type Keepalive struct { + ServerMinInterval commonapi.Duration `json:"serverMinInterval,omitempty"` + ServerInterval commonapi.Duration `json:"serverInterval,omitempty"` + ServerTimeout commonapi.Duration `json:"serverTimeout,omitempty"` +} + +// TLS contains configuration for TLS connections. +type TLS struct { + Enabled *bool `json:"enabled,omitempty"` + PrivateKey string `json:"privateKey,omitempty"` + Certificate string `json:"certificate,omitempty"` + RootCAs []string `json:"rootCas,omitempty"` + ClientAuthRequired *bool `json:"clientAuthRequired,omitempty"` + ClientRootCAs []string `json:"clientRootCas,omitempty"` +} + +// SASLPlain contains configuration for SASL/PLAIN authentication +type SASLPlain struct { + Enabled *bool `json:"enabled,omitempty"` + User string `json:"user,omitempty"` + Password string `json:"password,omitempty"` +} + +// Authentication contains configuration parameters related to authenticating +// client messages. +type Authentication struct { + TimeWindow commonapi.Duration `json:"timeWindow,omitempty"` + NoExpirationChecks *bool `json:"noExpirationChecks,omitempty"` +} + +// Profile contains configuration for Go pprof profiling. +type Profile struct { + Enabled *bool `json:"enabled,omitempty"` + Address string `json:"address,omitempty"` +} + +// FileLedger contains configuration for the file-based ledger. +type FileLedger struct { + Location string `json:"location,omitempty"` + Prefix string `json:"prefix,omitempty"` +} + +// Retry contains configuration related to retries and timeouts when the +// connection to the Kafka cluster cannot be established, or when Metadata +// requests needs to be repeated (because the cluster is in the middle of a +// leader election). +type Retry struct { + ShortInterval commonapi.Duration `json:"shortInterval,omitempty"` + ShortTotal commonapi.Duration `json:"shortTotal,omitempty"` + LongInterval commonapi.Duration `json:"longInterval,omitempty"` + LongTotal commonapi.Duration `json:"longTotal,omitempty"` + NetworkTimeouts NetworkTimeouts `json:"networkTimeouts,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Producer Producer `json:"producer,omitempty"` + Consumer Consumer `json:"consumer,omitempty"` +} + +// NetworkTimeouts contains the socket timeouts for network requests to the +// Kafka cluster. +type NetworkTimeouts struct { + DialTimeout commonapi.Duration `json:"dialTimeout,omitempty"` + ReadTimeout commonapi.Duration `json:"readTimeout,omitempty"` + WriteTimeout commonapi.Duration `json:"writeTimeout,omitempty"` +} + +// Metadata contains configuration for the metadata requests to the Kafka +// cluster. +type Metadata struct { + RetryMax int `json:"retryMax,omitempty"` + RetryBackoff commonapi.Duration `json:"retryBackoff,omitempty"` +} + +// Producer contains configuration for the producer's retries when failing to +// post a message to a Kafka partition. +type Producer struct { + RetryMax int `json:"retryMax,omitempty"` + RetryBackoff commonapi.Duration `json:"retryBackoff,omitempty"` +} + +// Consumer contains configuration for the consumer's retries when failing to +// read from a Kafa partition. +type Consumer struct { + RetryBackoff commonapi.Duration `json:"retryBackoff,omitempty"` +} + +// Topic contains the settings to use when creating Kafka topics +type Topic struct { + ReplicationFactor int16 `json:"replicationFactor,omitempty"` +} + +// Debug contains configuration for the orderer's debug parameters. +type Debug struct { + BroadcastTraceDir string `json:"broadcastTraceDir,omitempty"` + DeliverTraceDir string `json:"deliverTraceDir,omitempty"` +} + +// Operations configures the operations endpont for the orderer. +type Operations struct { + ListenAddress string `json:"listenAddress,omitempty"` + TLS TLS `json:"tls,omitempty"` +} + +// Operations confiures the metrics provider for the orderer. +type Metrics struct { + Provider string `json:"provider,omitempty"` + Statsd Statsd `json:"statsd,omitempty"` +} + +// Statsd provides the configuration required to emit statsd metrics from the orderer. +type Statsd struct { + Network string `json:"network,omitempty"` + Address string `json:"address,omitempty"` + WriteInterval commonapi.Duration `json:"writeInterval,omitempty"` + Prefix string `json:"prefix,omitempty"` +} diff --git a/pkg/apis/orderer/v2/orderer.go b/pkg/apis/orderer/v2/orderer.go new file mode 100644 index 00000000..59ebe244 --- /dev/null +++ b/pkg/apis/orderer/v2/orderer.go @@ -0,0 +1,50 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2 + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" +) + +type Orderer struct { + General General `json:"general,omitempty"` + FileLedger v1.FileLedger `json:"fileLedger,omitempty"` + Debug v1.Debug `json:"debug,omitempty"` + Consensus interface{} `json:"consensus,omitempty"` + Operations v1.Operations `json:"operations,omitempty"` + Metrics v1.Metrics `json:"metrics,omitempty"` +} + +type General struct { + ListenAddress string `json:"listenAddress,omitempty"` + ListenPort uint16 `json:"listenPort,omitempty"` + TLS v1.TLS `json:"tls,omitempty"` + Cluster v1.Cluster `json:"cluster,omitempty"` + Keepalive v1.Keepalive `json:"keepalive,omitempty"` + ConnectionTimeout commonapi.Duration `json:"connectionTimeout,omitempty"` + GenesisFile string `json:"genesisFile,omitempty"` // For compatibility only, will be replaced by BootstrapFile + BootstrapFile string `json:"bootstrapFile,omitempty"` + BootstrapMethod string `json:"bootstrapMethod,omitempty"` + Profile v1.Profile `json:"profile,omitempty"` + LocalMSPDir string `json:"localMspDir,omitempty"` + LocalMSPID string `json:"localMspId,omitempty"` + BCCSP *commonapi.BCCSP `json:"BCCSP,omitempty"` + Authentication v1.Authentication `json:"authentication,omitempty"` +} diff --git a/pkg/apis/orderer/v24/orderer.go b/pkg/apis/orderer/v24/orderer.go new file mode 100644 index 00000000..efdc991b --- /dev/null +++ b/pkg/apis/orderer/v24/orderer.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v24 + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" +) + +type Orderer struct { + General General `json:"general,omitempty"` + FileLedger FileLedger `json:"fileLedger,omitempty"` + Debug v1.Debug `json:"debug,omitempty"` + Consensus interface{} `json:"consensus,omitempty"` + Operations v1.Operations `json:"operations,omitempty"` + Metrics v1.Metrics `json:"metrics,omitempty"` + Admin Admin `json:"admin,omitempty"` + ChannelParticipation ChannelParticipation `json:"channelParticipation,omitempty"` +} + +type General struct { + ListenAddress string `json:"listenAddress,omitempty"` + ListenPort uint16 `json:"listenPort,omitempty"` + TLS v1.TLS `json:"tls,omitempty"` + Cluster v1.Cluster `json:"cluster,omitempty"` + Keepalive v1.Keepalive `json:"keepalive,omitempty"` + ConnectionTimeout commonapi.Duration `json:"connectionTimeout,omitempty"` + GenesisFile string `json:"genesisFile,omitempty"` // For compatibility only, will be replaced by BootstrapFile + BootstrapFile string `json:"bootstrapFile,omitempty"` + BootstrapMethod string `json:"bootstrapMethod,omitempty"` + Profile v1.Profile `json:"profile,omitempty"` + LocalMSPDir string `json:"localMspDir,omitempty"` + LocalMSPID string `json:"localMspId,omitempty"` + BCCSP *commonapi.BCCSP `json:"BCCSP,omitempty"` + Authentication v1.Authentication `json:"authentication,omitempty"` +} + +// FileLedger contains configuration for the file-based ledger. +type FileLedger struct { + Location string `json:"location,omitempty"` +} + +type Admin struct { + ListenAddress string `json:"listenAddress,omitempty"` + TLs v1.TLS `json:"tls,omitempty"` +} + +type ChannelParticipation struct { + Enabled *bool `json:"enabled,omitempty"` + MaxRequestBodySize uint32 `json:"maxRequestBodySize,omitempty"` +} diff --git a/pkg/apis/peer/v1/peer.go b/pkg/apis/peer/v1/peer.go new file mode 100644 index 00000000..3cacdf43 --- /dev/null +++ b/pkg/apis/peer/v1/peer.go @@ -0,0 +1,357 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/docker/docker/api/types/container" +) + +type Core struct { + Peer Peer `json:"peer,omitempty"` + Chaincode Chaincode `json:"chaincode,omitempty"` + Operations Operations `json:"operations,omitempty"` + Metrics Metrics `json:"metrics,omitempty"` + VM VM `json:"vm,omitempty"` + Ledger Ledger `json:"ledger,omitempty"` + + // Not Fabric - this is for deployment + MaxNameLength *int `json:"maxnamelength,omitempty"` +} + +type Peer struct { + ID string `json:"id,omitempty"` + NetworkID string `json:"networkId,omitempty"` + ListenAddress string `json:"listenAddress,omitempty"` + ChaincodeListenAddress string `json:"chaincodeListenAddress,omitempty"` + ChaincodeAddress string `json:"chaincodeAddress,omitempty"` + Address string `json:"address,omitempty"` + AddressAutoDetect *bool `json:"addressAutoDetect,omitempty"` + Keepalive KeepAlive `json:"keepalive,omitempty"` + Gossip Gossip `json:"gossip,omitempty"` + TLS TLS `json:"tls,omitempty"` + Authentication Authentication `json:"authentication,omitempty"` + FileSystemPath string `json:"fileSystemPath,omitempty"` + BCCSP *common.BCCSP `json:"BCCSP,omitempty"` + MspConfigPath string `json:"mspConfigPath,omitempty"` + LocalMspId string `json:"localMspId,omitempty"` + Client Client `json:"client,omitempty"` + DeliveryClient DeliveryClient `json:"deliveryclient,omitempty"` + LocalMspType string `json:"localMspType,omitempty"` + Profile Profile `json:"profile,omitempty"` + AdminService AdminService `json:"adminService,omitempty"` + Handlers HandlersConfig `json:"handlers,omitempty"` + ValidatorPoolSize int `json:"validatorPoolSize,omitempty"` + Discovery Discovery `json:"discovery,omitempty"` + Limits Limits `json:"limits,omitempty"` +} + +type PluginMapping map[string]HandlerConfig + +type HandlersConfig struct { + AuthFilters []HandlerConfig `json:"authFilters"` + Decorators []HandlerConfig `json:"decorators"` + Endorsers PluginMapping `json:"endorsers"` + Validators PluginMapping `json:"validators"` +} + +type HandlerConfig struct { + Name string `json:"name"` + Library string `json:"library"` +} + +type KeepAlive struct { + MinInterval common.Duration `json:"minInterval,omitempty"` + Client KeepAliveClient `json:"client,omitempty"` + DeliveryClient KeepAliveClient `json:"deliveryClient,omitempty"` +} + +type KeepAliveClient struct { + Interval common.Duration `json:"interval,omitempty"` + Timeout common.Duration `json:"timeout,omitempty"` +} + +type KeepAliveDeliveryClient struct { + Interval common.Duration `json:"interval,omitempty"` + Timeout common.Duration `json:"timeout,omitempty"` +} + +type Gossip struct { + Bootstrap []string `json:"bootstrap,omitempty"` + UseLeaderElection *bool `json:"useLeaderElection,omitempty"` + OrgLeader *bool `json:"orgLeader,omitempty"` + MembershipTrackerInterval common.Duration `json:"membershipTrackerInterval,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + MaxBlockCountToStore int `json:"maxBlockCountToStore,omitempty"` + MaxPropagationBurstLatency common.Duration `json:"maxPropagationBurstLatency,omitempty"` + MaxPropagationBurstSize int `json:"maxPropagationBurstSize,omitempty"` + PropagateIterations int `json:"propagateIterations,omitempty"` + PropagatePeerNum int `json:"propagatePeerNum,omitempty"` + PullInterval common.Duration `json:"pullInterval,omitempty"` + PullPeerNum int `json:"pullPeerNum,omitempty"` + RequestStateInfoInterval common.Duration `json:"requestStateInfoInterval,omitempty"` + PublishStateInfoInterval common.Duration `json:"publishStateInfoInterval,omitempty"` + StateInfoRetentionInterval common.Duration `json:"stateInfoRetentionInterval,omitempty"` + PublishCertPeriod common.Duration `json:"publishCertPeriod,omitempty"` + SkipBlockVerification *bool `json:"skipBlockVerification,omitempty"` + DialTimeout common.Duration `json:"dialTimeout,omitempty"` + ConnTimeout common.Duration `json:"connTimeout,omitempty"` + RecvBuffSize int `json:"recvBuffSize,omitempty"` + SendBuffSize int `json:"sendBuffSize,omitempty"` + DigestWaitTime common.Duration `json:"digestWaitTime,omitempty"` + RequestWaitTime common.Duration `json:"requestWaitTime,omitempty"` + ResponseWaitTime common.Duration `json:"responseWaitTime,omitempty"` + AliveTimeInterval common.Duration `json:"aliveTimeInterval,omitempty"` + AliveExpirationTimeout common.Duration `json:"aliveExpirationTimeout,omitempty"` + ReconnectInterval common.Duration `json:"reconnectInterval,omitempty"` + ExternalEndpoint string `json:"externalEndpoint,omitempty"` + Election Election `json:"election,omitempty"` + PvtData PVTData `json:"pvtData,omitempty"` + State State `json:"state,omitempty"` + MaxConnectionAttempts int `json:"maxConnectionAttempts,omitempty"` + MsgExpirationFactor int `json:"msgExpirationFactor,omitempty"` +} + +type Election struct { + StartupGracePeriod common.Duration `json:"startupGracePeriod,omitempty"` + MembershipSampleInterval common.Duration `json:"membershipSampleInterval,omitempty"` + LeaderElectionDuration common.Duration `json:"leaderElectionDuration,omitempty"` + LeaderAliveThreshold common.Duration `json:"leaderAliveThreshold,omitempty"` +} + +type PVTData struct { + PullRetryThreshold common.Duration `json:"pullRetryThreshold,omitempty"` + TransientstoreMaxBlockRetention int `json:"transientstoreMaxBlockRetention,omitempty"` + PushAckTimeout common.Duration `json:"pushAckTimeout,omitempty"` + BtlPullMargin int `json:"btlPullMargin,omitempty"` + ReconcileBatchSize int `json:"reconcileBatchSize,omitempty"` + ReconcileSleepInterval common.Duration `json:"reconcileSleepInterval,omitempty"` + ReconciliationEnabled *bool `json:"reconciliationEnabled,omitempty"` + SkipPullingInvalidTransactionsDuringCommit *bool `json:"skipPullingInvalidTransactionsDuringCommit,omitempty"` +} + +type State struct { + Enabled *bool `json:"enabled,omitempty"` + CheckInterval common.Duration `json:"checkInterval,omitempty"` + ResponseTimeout common.Duration `json:"responseTimeout,omitempty"` + BatchSize int `json:"batchSize,omitempty"` + BlockBufferSize int `json:"blockBufferSize,omitempty"` + MaxRetries int `json:"maxRetries,omitempty"` +} + +type TLS struct { + Enabled *bool `json:"enabled,omitempty"` + ClientAuthRequired *bool `json:"clientAuthRequired,omitempty"` + Cert Cert `json:"cert,omitempty"` + Key Key `json:"key,omitempty"` + RootCert Cert `json:"rootCert,omitempty"` + ClientRootCAs ClientRootCAs `json:"clientRootCas,omitempty"` + ClientKey Key `json:"clientKey,omitempty"` + ClientCert Cert `json:"clientCert,omitempty"` +} + +type Cert struct { + File string `json:"file,omitempty"` +} + +type Key struct { + File string `json:"file,omitempty"` +} + +type ClientRootCAs struct { + Files []string `json:"files,omitempty"` +} + +type Authentication struct { + Timewindow common.Duration `json:"timewindow,omitempty"` +} + +type Client struct { + ConnTimeout common.Duration `json:"connTimeout,omitempty"` +} + +type AddressOverride struct { + From string `json:"from"` + To string `json:"to"` + CACertsFile string `json:"caCertsFile"` +} + +type DeliveryClient struct { + ReconnectTotalTimeThreshold common.Duration `json:"reconnectTotalTimeThreshold,omitempty"` + ConnTimeout common.Duration `json:"connTimeout,omitempty"` + ReConnectBackoffThreshold common.Duration `json:"reConnectBackoffThreshold,omitempty"` + AddressOverrides []AddressOverride `json:"addressOverrides,omitempty"` +} + +type Profile struct { + Enabled *bool `json:"enabled,omitempty"` + ListenAddress string `json:"listenAddress,omitempty"` +} + +type AdminService struct { + ListenAddress string `json:"listenAddress,omitempty"` +} + +type Discovery struct { + Enabled *bool `json:"enabled,omitempty"` + AuthCacheEnabled *bool `json:"authCacheEnabled,omitempty"` + AuthCacheMaxSize int `json:"authCacheMaxSize,omitempty"` + AuthCachePurgeRetentionRatio float64 `json:"authCachePurgeRetentionRatio,omitempty"` + OrgMembersAllowedAccess *bool `json:"orgMembersAllowedAccess,omitempty"` +} + +type Limits struct { + Concurrency Concurrency `json:"concurrency,omitempty"` +} + +type Concurrency struct { + Qscc int `json:"qscc,omitempty"` +} + +// Operations configures the operations endpont for the peer. +type Operations struct { + ListenAddress string `json:"listenAddress,omitempty"` + TLS OperationsTLS `json:"tls,omitempty"` +} + +// TLS contains configuration for TLS connections. +type OperationsTLS struct { + Enabled *bool `json:"enabled,omitempty"` + PrivateKey File `json:"key,omitempty"` + Certificate File `json:"cert,omitempty"` + ClientAuthRequired *bool `json:"clientAuthRequired,omitempty"` + ClientRootCAs Files `json:"clientRootCas,omitempty"` +} + +type File struct { + File string `json:"file,omitempty"` +} + +type Files struct { + Files []string `json:"files,omitempty"` +} + +// Metrics confiures the metrics provider for the peer. +type Metrics struct { + Provider string `json:"provider,omitempty"` + Statsd Statsd `json:"statsd,omitempty"` +} + +// Statsd provides the configuration required to emit statsd metrics from the peer. +type Statsd struct { + Network string `json:"network,omitempty"` + Address string `json:"address,omitempty"` + WriteInterval common.Duration `json:"writeInterval,omitempty"` + Prefix string `json:"prefix,omitempty"` +} + +type Chaincode struct { + ID ID `json:"id,omitempty"` + Builder string `json:"builder,omitempty"` + Pull *bool `json:"pull.omitempty"` + Golang Golang `json:"golang,omitempty"` + Java Java `json:"java,omitempty"` + Node Node `json:"node,omitempty"` + StartupTimeout common.Duration `json:"startuptimeout,omitempty"` + ExecuteTimeout common.Duration `json:"executetimeout,omitempty"` + InstallTimeout common.Duration `json:"installTimeout,omitempty"` + Mode string `json:"mode,omitempty"` + KeepAlive common.Duration `json:"keepalive,omitempty"` + System map[string]string `json:"system,omitempty"` + Logging Logging `json:"logging,omitempty"` + SystemPlugins []SystemPlugin `json:"systemPlugins,omitempty"` +} + +type SystemPlugin struct { + Enabled *bool `json:"enabled"` + Name string `json:"name"` + Path string `json:"path"` + InvokableExternal *bool `json:"invokableExternal"` + InvokableCC2CC *bool `json:"invokableCC2CC"` +} + +type ID struct { + Path string `json:"path,omitempty"` + Name string `json:"name,omitempty"` +} + +type Golang struct { + Runtime string `json:"runtime,omitempty"` + DynamicLink *bool `json:"dynamicLink,omitempty"` +} + +type Java struct { + Runtime string `json:"runtime,omitempty"` +} + +type Node struct { + Runtime string `json:"runtime,omitempty"` +} + +type Logging struct { + Level string `json:"level,omitempty"` + Shim string `json:"shim,omitempty"` + Format string `json:"format,omitempty"` +} + +type VM struct { + Endpoint string `json:"endpoint,omitempty"` + Docker VMDocker `json:"docker,omitempty"` +} + +type VMDocker struct { + TLS DockerTLS `json:"tls,omitempty"` + AttachStdout *bool `json:"attachStdout,omitempty"` + HostConfig container.HostConfig `json:"hostConfig,omitempty"` +} + +type DockerTLS struct { + Enabled *bool `json:"enabled,omitempty"` + CA File `json:"ca,omitempty"` + Cert File `json:"cert,omitempty"` + Key File `json:"key,omitempty"` +} + +type Ledger struct { + State LedgerState `json:"state,omitempty"` + History LedgerHistory `json:"history,omitempty"` +} + +type LedgerState struct { + StateDatabase string `json:"stateDatabase,omitempty"` + TotalQueryLimit int `json:"totalQueryLimit,omitempty"` + CouchdbConfig CouchdbConfig `json:"couchDBConfig,omitempty"` +} + +type CouchdbConfig struct { + CouchDBAddress string `json:"couchDBAddress,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + MaxRetries int `json:"maxRetries,omitempty"` + MaxRetriesOnStartup int `json:"maxRetriesOnStartup,omitempty"` + RequestTimeout common.Duration `json:"requestTimeout,omitempty"` + QueryLimit int `json:"internalQueryLimit,omitempty"` + MaxBatchUpdateSize int `json:"maxBatchUpdateSize,omitempty"` + WarmIndexesAfterNBlocks int `json:"warmIndexesAfterNBlocks,omitempty"` + CreateGlobalChangesDB *bool `json:"createGlobalChangesDB,omitempty"` +} + +type LedgerHistory struct { + EnableHistoryDatabase *bool `json:"enableHistoryDatabase,omitempty"` +} diff --git a/pkg/apis/peer/v2/peer.go b/pkg/apis/peer/v2/peer.go new file mode 100644 index 00000000..5434f062 --- /dev/null +++ b/pkg/apis/peer/v2/peer.go @@ -0,0 +1,222 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2 + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +type Core struct { + Peer Peer `json:"peer,omitempty"` + Chaincode Chaincode `json:"chaincode,omitempty"` + Operations v1.Operations `json:"operations,omitempty"` + Metrics v1.Metrics `json:"metrics,omitempty"` + VM v1.VM `json:"vm,omitempty"` + Ledger Ledger `json:"ledger,omitempty"` + // Not Fabric - this is for deployment + MaxNameLength *int `json:"maxnamelength,omitempty"` +} + +type Peer struct { + ID string `json:"id,omitempty"` + NetworkID string `json:"networkId,omitempty"` + ListenAddress string `json:"listenAddress,omitempty"` + ChaincodeListenAddress string `json:"chaincodeListenAddress,omitempty"` + ChaincodeAddress string `json:"chaincodeAddress,omitempty"` + Address string `json:"address,omitempty"` + AddressAutoDetect *bool `json:"addressAutoDetect,omitempty"` + Gateway Gateway `json:"gateway,omitempty"` + Keepalive KeepAlive `json:"keepalive,omitempty"` + Gossip Gossip `json:"gossip,omitempty"` + TLS v1.TLS `json:"tls,omitempty"` + Authentication v1.Authentication `json:"authentication,omitempty"` + FileSystemPath string `json:"fileSystemPath,omitempty"` + BCCSP *common.BCCSP `json:"BCCSP,omitempty"` + MspConfigPath string `json:"mspConfigPath,omitempty"` + LocalMspId string `json:"localMspId,omitempty"` + Client v1.Client `json:"client,omitempty"` + DeliveryClient v1.DeliveryClient `json:"deliveryclient,omitempty"` + LocalMspType string `json:"localMspType,omitempty"` + Profile v1.Profile `json:"profile,omitempty"` + AdminService v1.AdminService `json:"adminService,omitempty"` + Handlers v1.HandlersConfig `json:"handlers,omitempty"` + ValidatorPoolSize int `json:"validatorPoolSize,omitempty"` + Discovery v1.Discovery `json:"discovery,omitempty"` + Limits Limits `json:"limits,omitempty"` +} + +type Gossip struct { + Bootstrap []string `json:"bootstrap,omitempty"` + UseLeaderElection *bool `json:"useLeaderElection,omitempty"` + OrgLeader *bool `json:"orgLeader,omitempty"` + MembershipTrackerInterval common.Duration `json:"membershipTrackerInterval,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + MaxBlockCountToStore int `json:"maxBlockCountToStore,omitempty"` + MaxPropagationBurstLatency common.Duration `json:"maxPropagationBurstLatency,omitempty"` + MaxPropagationBurstSize int `json:"maxPropagationBurstSize,omitempty"` + PropagateIterations int `json:"propagateIterations,omitempty"` + PropagatePeerNum int `json:"propagatePeerNum,omitempty"` + PullInterval common.Duration `json:"pullInterval,omitempty"` + PullPeerNum int `json:"pullPeerNum,omitempty"` + RequestStateInfoInterval common.Duration `json:"requestStateInfoInterval,omitempty"` + PublishStateInfoInterval common.Duration `json:"publishStateInfoInterval,omitempty"` + StateInfoRetentionInterval common.Duration `json:"stateInfoRetentionInterval,omitempty"` + PublishCertPeriod common.Duration `json:"publishCertPeriod,omitempty"` + SkipBlockVerification *bool `json:"skipBlockVerification,omitempty"` + DialTimeout common.Duration `json:"dialTimeout,omitempty"` + ConnTimeout common.Duration `json:"connTimeout,omitempty"` + RecvBuffSize int `json:"recvBuffSize,omitempty"` + SendBuffSize int `json:"sendBuffSize,omitempty"` + DigestWaitTime common.Duration `json:"digestWaitTime,omitempty"` + RequestWaitTime common.Duration `json:"requestWaitTime,omitempty"` + ResponseWaitTime common.Duration `json:"responseWaitTime,omitempty"` + AliveTimeInterval common.Duration `json:"aliveTimeInterval,omitempty"` + AliveExpirationTimeout common.Duration `json:"aliveExpirationTimeout,omitempty"` + ReconnectInterval common.Duration `json:"reconnectInterval,omitempty"` + ExternalEndpoint string `json:"externalEndpoint,omitempty"` + Election v1.Election `json:"election,omitempty"` + PvtData PVTData `json:"pvtData,omitempty"` + State v1.State `json:"state,omitempty"` + MaxConnectionAttempts int `json:"maxConnectionAttempts,omitempty"` + MsgExpirationFactor int `json:"msgExpirationFactor,omitempty"` +} + +type PVTData struct { + PullRetryThreshold common.Duration `json:"pullRetryThreshold,omitempty"` + TransientstoreMaxBlockRetention int `json:"transientstoreMaxBlockRetention,omitempty"` + PushAckTimeout common.Duration `json:"pushAckTimeout,omitempty"` + BtlPullMargin int `json:"btlPullMargin,omitempty"` + ReconcileBatchSize int `json:"reconcileBatchSize,omitempty"` + ReconcileSleepInterval common.Duration `json:"reconcileSleepInterval,omitempty"` + ReconciliationEnabled *bool `json:"reconciliationEnabled,omitempty"` + SkipPullingInvalidTransactionsDuringCommit *bool `json:"skipPullingInvalidTransactionsDuringCommit,omitempty"` + ImplicitCollectionDisseminationPolicy ImplicitCollectionDisseminationPolicy `json:"implicitCollectionDisseminationPolicy,omitempty"` +} + +type AddressOverride struct { + From string `json:"from"` + To string `json:"to"` + CACertsFile string `json:"caCertsFile"` + certBytes []byte +} + +type Limits struct { + Concurrency Concurrency `json:"concurrency,omitempty"` +} + +type Concurrency struct { + EndorserService int `json:"endorserService,omitempty"` + DeliverService int `json:"deliverService,omitempty"` + GatewayService int `json:"gatewayService,omitempty"` +} + +type ImplicitCollectionDisseminationPolicy struct { + RequiredPeerCount int `json:"requiredPeerCount,omitempty"` + MaxPeerCount int `json:"maxPeerCount,omitempty"` +} + +type Chaincode struct { + ID v1.ID `json:"id,omitempty"` + Builder string `json:"builder,omitempty"` + Pull *bool `json:"pull,omitempty"` + Golang v1.Golang `json:"golang,omitempty"` + Java v1.Java `json:"java,omitempty"` + Node v1.Node `json:"node,omitempty"` + StartupTimeout common.Duration `json:"startuptimeout,omitempty"` + ExecuteTimeout common.Duration `json:"executetimeout,omitempty"` + Mode string `json:"mode,omitempty"` + KeepAlive common.Duration `json:"keepalive,omitempty"` + System map[string]string `json:"system,omitempty"` + Logging v1.Logging `json:"logging,omitempty"` + ExternalBuilders []ExternalBuilder `json:"externalBuilders,omitempty"` + InstallTimeout common.Duration `json:"installTimeout,omitempty"` +} + +type ExternalBuilder struct { + Path string `json:"path,omitempty"` + Name string `json:"name,omitempty"` + EnvironmentWhiteList []string `json:"environmentWhiteList,omitempty"` + PropogateEnvironment []string `json:"propagateEnvironment,omitempty"` +} + +type Ledger struct { + State LedgerState `json:"state,omitempty"` + History v1.LedgerHistory `json:"history,omitempty"` + PvtDataStore PvtDataStore `json:"pvtdataStore,omitempty"` +} + +type LedgerState struct { + StateDatabase string `json:"stateDatabase,omitempty"` + TotalQueryLimit int `json:"totalQueryLimit,omitempty"` + CouchdbConfig CouchdbConfig `json:"couchDBConfig,omitempty"` + SnapShots SnapShots `json:"SnapShots,omitempty"` +} + +type CouchdbConfig struct { + CouchDBAddress string `json:"couchDBAddress,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + MaxRetries int `json:"maxRetries,omitempty"` + MaxRetriesOnStartup int `json:"maxRetriesOnStartup,omitempty"` + RequestTimeout common.Duration `json:"requestTimeout,omitempty"` + QueryLimit int `json:"internalQueryLimit,omitempty"` + MaxBatchUpdateSize int `json:"maxBatchUpdateSize,omitempty"` + WarmIndexesAfterNBlocks int `json:"warmIndexesAfterNBlocks,omitempty"` + CreateGlobalChangesDB *bool `json:"createGlobalChangesDB,omitempty"` + CacheSize int `json:"cacheSize,omitempty"` +} + +type SnapShots struct { + RootDir string `json:"rootDir,omitempty"` +} + +type PvtDataStore struct { + CollElgProcMaxDbBatchSize int `json:"collElgProcMaxDbBatchSize,omitempty"` + CollElgProcDbBatchesInterval int `json:"collElgProcDbBatchesInterval,omitempty"` + DeprioritizedDataReconcilerInterval common.Duration `json:"deprioritizedDataReconcilerInterval,omitempty"` +} + +type Gateway struct { + Enabled *bool `json:"enabled,omitempty"` + EndorsementTimeout common.Duration `json:"endorsementTimeout,omitempty"` + DialTimeout common.Duration `json:"dialTimeout,omitempty"` +} + +type KeepAlive struct { + Interval common.Duration `json:"interval,omitempty"` + Timeout common.Duration `json:"timeout,omitempty"` + MinInterval common.Duration `json:"minInterval,omitempty"` + Client v1.KeepAliveClient `json:"client,omitempty"` + DeliveryClient v1.KeepAliveClient `json:"deliveryClient,omitempty"` +} + +func (a *AddressOverride) CACertsFileToBytes() ([]byte, error) { + data, err := util.Base64ToBytes(a.CACertsFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func (a *AddressOverride) GetCertBytes() []byte { + return a.certBytes +} diff --git a/pkg/certificate/certificate.go b/pkg/certificate/certificate.go new file mode 100644 index 00000000..34c0bc98 --- /dev/null +++ b/pkg/certificate/certificate.go @@ -0,0 +1,437 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package certificate + +import ( + "context" + "fmt" + "path/filepath" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate/reenroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("certificate_manager") + +//go:generate counterfeiter -o mocks/reenroller.go -fake-name Reenroller . Reenroller +type Reenroller interface { + Reenroll() (*config.Response, error) +} + +type CertificateManager struct { + Client k8sclient.Client + Scheme *runtime.Scheme +} + +func New(client k8sclient.Client, scheme *runtime.Scheme) *CertificateManager { + return &CertificateManager{ + Client: client, + Scheme: scheme, + } +} + +func (c *CertificateManager) GetExpireDate(pemBytes []byte) (time.Time, error) { + cert, err := util.GetCertificateFromPEMBytes(pemBytes) + if err != nil { + return time.Time{}, errors.New("failed to get certificate from bytes") + } + + return cert.NotAfter, nil +} + +func (c *CertificateManager) GetDurationToNextRenewal(certType common.SecretType, instance v1.Object, numSecondsBeforeExpire int64) (time.Duration, error) { + certName := fmt.Sprintf("%s-%s-signcert", certType, instance.GetName()) + cert, err := c.GetSignCert(certName, instance.GetNamespace()) + if err != nil { + return time.Duration(0), err + } + + return c.GetDurationToNextRenewalForCert(certName, cert, instance, numSecondsBeforeExpire) +} + +func (c *CertificateManager) GetDurationToNextRenewalForCert(certName string, cert []byte, instance v1.Object, numSecondsBeforeExpire int64) (time.Duration, error) { + expireDate, err := c.GetExpireDate(cert) + if err != nil { + return time.Duration(0), err + } + + if expireDate.IsZero() { + return time.Duration(0), errors.New("failed to get non-zero expiration date from certificate") + } + if expireDate.Before(time.Now()) { + return time.Duration(0), fmt.Errorf("%s has expired", certName) + } + + renewDate := expireDate.Add(-time.Duration(numSecondsBeforeExpire) * time.Second) // Subtract num seconds from expire date + duration := renewDate.Sub(time.Now()) // Get duration between now and the renew date (negative duration means renew date < time.Now()) + if duration < 0 { + return time.Duration(0), nil + } + return duration, nil +} + +func (c *CertificateManager) CertificateExpiring(certType common.SecretType, instance v1.Object, numSecondsBeforeExpire int64) (expiring bool, expireDate time.Time, err error) { + certName := fmt.Sprintf("%s-%s-signcert", certType, instance.GetName()) + cert, err := c.GetSignCert(certName, instance.GetNamespace()) + if err != nil { + return false, time.Time{}, err + } + + return c.Expires(cert, numSecondsBeforeExpire) +} + +func (c *CertificateManager) Expires(cert []byte, numSecondsBeforeExpire int64) (expiring bool, expireDate time.Time, err error) { + expireDate, err = c.GetExpireDate(cert) + if err != nil { + return false, time.Time{}, err + } + + // Checks if the duration between time.Now() and the expiration date is less than or equal to the numSecondsBeforeExpire + if expireDate.Sub(time.Now()) <= time.Duration(numSecondsBeforeExpire)*time.Second { + return true, expireDate, nil + } + + return false, time.Time{}, nil +} + +func (c *CertificateManager) CheckCertificatesForExpire(instance v1.Object, numSecondsBeforeExpire int64) (statusType current.IBPCRStatusType, message string, err error) { + tlsExpiring, tlsExpireDate, err := c.CertificateExpiring(common.TLS, instance, numSecondsBeforeExpire) + if err != nil { + err = errors.Wrap(err, "failed to get tls signcert expiry info") + return + } + + ecertExpiring, ecertExpireDate, err := c.CertificateExpiring(common.ECERT, instance, numSecondsBeforeExpire) + if err != nil { + err = errors.Wrap(err, "failed to get ecert signcert expiry info") + return + } + + // If not certificate are expring, no further action is required + if !tlsExpiring && !ecertExpiring { + return current.Deployed, "", nil + } + + statusType = current.Warning + + if tlsExpiring { + // Check if tls cert's expiration date has already passed + if tlsExpireDate.Before(time.Now()) { + statusType = current.Error + message += fmt.Sprintf("tls-%s-signcert has expired", instance.GetName()) + } else { + message += fmt.Sprintf("tls-%s-signcert expires on %s", instance.GetName(), tlsExpireDate.String()) + } + } + + if message != "" { + message += ", " + } + + if ecertExpiring { + // Check if ecert's expiration date has already passed + if ecertExpireDate.Before(time.Now()) { + statusType = current.Error + message += fmt.Sprintf("ecert-%s-signcert has expired", instance.GetName()) + } else { + message += fmt.Sprintf("ecert-%s-signcert expires on %s", instance.GetName(), ecertExpireDate.String()) + } + } + + return statusType, message, nil +} + +func (c *CertificateManager) GetReenroller(certType common.SecretType, spec *current.EnrollmentSpec, bccsp *commonapi.BCCSP, storagePath string, certPemBytes, keyPemBytes []byte, hsmEnabled bool, newKey bool) (Reenroller, error) { + storagePath = filepath.Join(storagePath, "reenroller", string(certType)) + + var cfg *current.Enrollment + if certType == common.TLS { + cfg = spec.TLS + } else { + cfg = spec.Component + } + + certReenroller, err := reenroller.New(cfg, storagePath, bccsp, "", newKey) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize reenroller") + } + + err = certReenroller.InitClient() + if err != nil { + return nil, errors.Wrap(err, "failed to initialize CA client for reenroller") + } + + err = certReenroller.LoadIdentity(certPemBytes, keyPemBytes, hsmEnabled) + if err != nil { + return nil, errors.Wrap(err, "failed to load Identity for reenroller") + } + + return certReenroller, nil +} + +func (c *CertificateManager) ReenrollCert(certType common.SecretType, reenroller Reenroller, instance v1.Object, hsmEnabled bool) error { + if reenroller == nil { + return errors.New("reenroller not passed") + } + + resp, err := reenroller.Reenroll() + if err != nil { + return errors.Wrapf(err, "failed to renew %s certificate for instance '%s'", certType, instance.GetName()) + } + + err = c.UpdateSignCert(fmt.Sprintf("%s-%s-signcert", certType, instance.GetName()), resp.SignCert, instance) + if err != nil { + return errors.Wrapf(err, "failed to update signcert secret for instance '%s'", instance.GetName()) + } + + if !hsmEnabled { + err = c.UpdateKey(fmt.Sprintf("%s-%s-keystore", certType, instance.GetName()), resp.Keystore, instance) + if err != nil { + return errors.Wrapf(err, "failed to update keystore secret for instance '%s'", instance.GetName()) + } + } + + return nil +} + +type Instance interface { + v1.Object + UsingHSMProxy() bool + IsHSMEnabled() bool + EnrollerImage() string + GetPullSecrets() []corev1.LocalObjectReference + GetResource(current.Component) corev1.ResourceRequirements + PVCName() string +} + +func (c *CertificateManager) RenewCert(certType common.SecretType, instance Instance, spec *current.EnrollmentSpec, bccsp *commonapi.BCCSP, storagePath string, hsmEnabled bool, newKey bool) error { + cert, key, err := c.GetSignCertAndKey(certType, instance, hsmEnabled) + if err != nil { + return err + } + + if certType == common.TLS && hsmEnabled && !instance.UsingHSMProxy() { + bccsp = nil + } + + var certReenroller Reenroller + if certType == common.ECERT && hsmEnabled && !instance.UsingHSMProxy() { + log.Info(fmt.Sprintf("Certificate manager renewing ecert, non-proxy HSM enabled")) + hsmConfig, err := config.ReadHSMConfig(c.Client, instance) + if err != nil { + return err + } + + if hsmConfig.Daemon != nil { + certReenroller, err = reenroller.NewHSMDaemonReenroller(spec.Component, storagePath, bccsp, "", hsmConfig, instance, c.Client, c.Scheme, newKey) + if err != nil { + return err + } + } else { + certReenroller, err = reenroller.NewHSMReenroller(spec.Component, storagePath, bccsp, "", hsmConfig, instance, c.Client, c.Scheme, newKey) + if err != nil { + return err + } + } + + err = c.ReenrollCert(certType, certReenroller, instance, hsmEnabled) + if err != nil { + return err + } + + return nil + } + + // For TLS certificate, always use software enroller. We don't support HSM for TLS certificates + if certType == common.TLS { + log.Info("Certificate manager renewing TLS") + bccsp = nil + + keySecretName := fmt.Sprintf("%s-%s-keystore", certType, instance.GetName()) + key, err = c.GetKey(keySecretName, instance.GetNamespace()) + if err != nil { + return err + } + + certReenroller, err = c.GetReenroller(certType, spec, bccsp, storagePath, cert, key, false, newKey) + if err != nil { + return err + } + + err = c.ReenrollCert(certType, certReenroller, instance, false) + if err != nil { + return err + } + + return nil + } + + log.Info(fmt.Sprintf("Certificate manager renewing %s", certType)) + certReenroller, err = c.GetReenroller(certType, spec, bccsp, storagePath, cert, key, hsmEnabled, newKey) + if err != nil { + return err + } + + err = c.ReenrollCert(certType, certReenroller, instance, hsmEnabled) + if err != nil { + return err + } + + return nil +} + +func (c *CertificateManager) UpdateSignCert(name string, cert []byte, instance v1.Object) error { + // Cert might not be returned from reenroll call, for example if the reenroll happens in a job which handles + // updating the secret when using HSM (non-proxy) + if cert == nil || len(cert) == 0 { + return nil + } + + data := map[string][]byte{ + "cert.pem": cert, + } + + err := c.UpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (c *CertificateManager) UpdateKey(name string, key []byte, instance v1.Object) error { + // Need to ensure the value passed in for key is valid before updating. + // Otherwise, an empty key will end up in the secret overriding a valid key, which + // will cause runtime errors on nodes + if key == nil || len(key) == 0 { + return nil + } + + data := map[string][]byte{ + "key.pem": key, + } + + err := c.UpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (c *CertificateManager) UpdateSecret(instance v1.Object, name string, data map[string][]byte) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + }, + Data: data, + Type: corev1.SecretTypeOpaque, + } + + err := c.Client.Update(context.TODO(), secret, k8sclient.UpdateOption{Owner: instance, Scheme: c.Scheme}) + if err != nil { + return err + } + + return nil +} + +func (c *CertificateManager) GetSignCertAndKey(certType common.SecretType, instance v1.Object, hsmEnabled bool) ([]byte, []byte, error) { + certSecretName := fmt.Sprintf("%s-%s-signcert", certType, instance.GetName()) + keySecretName := fmt.Sprintf("%s-%s-keystore", certType, instance.GetName()) + + cert, err := c.GetSignCert(certSecretName, instance.GetNamespace()) + if err != nil { + return nil, nil, err + } + + key := []byte{} + if !hsmEnabled { + key, err = c.GetKey(keySecretName, instance.GetNamespace()) + if err != nil { + return nil, nil, err + } + } + + return cert, key, nil +} + +func (c *CertificateManager) GetSignCert(name, namespace string) ([]byte, error) { + secret, err := c.GetSecret(name, namespace) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, errors.New(fmt.Sprintf("%s secret is blank", name)) + } + + if secret.Data["cert.pem"] != nil { + return secret.Data["cert.pem"], nil + } + + return nil, errors.New(fmt.Sprintf("cannot get %s", name)) +} + +func (c *CertificateManager) GetKey(name, namespace string) ([]byte, error) { + secret, err := c.GetSecret(name, namespace) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, errors.New(fmt.Sprintf("%s secret is blank", name)) + } + + if secret.Data["key.pem"] != nil { + return secret.Data["key.pem"], nil + } + + return nil, errors.New(fmt.Sprintf("cannot get %s", name)) +} + +func (c *CertificateManager) GetSecret(name, namespace string) (*corev1.Secret, error) { + namespacedName := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + secret := &corev1.Secret{} + err := c.Client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + return secret, nil +} diff --git a/pkg/certificate/certificate_suite_test.go b/pkg/certificate/certificate_suite_test.go new file mode 100644 index 00000000..5840f12e --- /dev/null +++ b/pkg/certificate/certificate_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package certificate_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCertificate(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Certificate Suite") +} diff --git a/pkg/certificate/certificate_test.go b/pkg/certificate/certificate_test.go new file mode 100644 index 00000000..1a656c70 --- /dev/null +++ b/pkg/certificate/certificate_test.go @@ -0,0 +1,362 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package certificate_test + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "errors" + "math/big" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Certificate", func() { + var ( + certificateManager *certificate.CertificateManager + mockClient *controllermocks.Client + mockEnroller *mocks.Reenroller + instance v1.Object + + certBytes []byte + ) + + BeforeEach(func() { + mockClient = &controllermocks.Client{} + mockEnroller = &mocks.Reenroller{} + + certificateManager = certificate.New(mockClient, &runtime.Scheme{}) + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peer-1", + Namespace: "peer-namespace", + Labels: map[string]string{}, + }, + } + + certBytes = createCert(time.Now().Add(time.Hour * 24 * 30)) // expires in 30 days + + reenrollResponse := &config.Response{ + SignCert: []byte("cert"), + Keystore: []byte("key"), + } + + mockEnroller.ReenrollReturns(reenrollResponse, nil) + mockClient.UpdateReturns(nil) + + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.Secret) + switch types.Name { + case "tls-" + instance.GetName() + "-signcert": + o.Name = "tls-" + instance.GetName() + "-signcert" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"cert.pem": certBytes} + case "tls-" + instance.GetName() + "-keystore": + o.Name = "tls-" + instance.GetName() + "-keystore" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"key.pem": []byte("key")} + case "ecert-" + instance.GetName() + "-signcert": + o.Name = "ecert-" + instance.GetName() + "-signcert" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"cert.pem": certBytes} + case "ecert-" + instance.GetName() + "-keystore": + o.Name = "ecert-" + instance.GetName() + "-keystore" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"key.pem": []byte("key")} + } + return nil + } + }) + + Context("get expire date", func() { + It("returns error if fails to read certificate", func() { + certbytes := []byte("invalid") + _, err := certificateManager.GetExpireDate(certbytes) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get certificate from bytes")) + }) + + It("returns expire date of certificate", func() { + expectedtime := time.Now().Add(time.Hour * 24 * 30).UTC() + expireDate, err := certificateManager.GetExpireDate(certBytes) + Expect(err).NotTo(HaveOccurred()) + Expect(expireDate.Month()).To(Equal(expectedtime.Month())) + Expect(expireDate.Day()).To(Equal(expectedtime.Day())) + Expect(expireDate.Year()).To(Equal(expectedtime.Year())) + }) + }) + + Context("get duration to next renewal", func() { + It("returns error if fails to get expire date", func() { + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.Secret) + o.Name = "tls-" + instance.GetName() + "-signcert" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"cert.pem": []byte("invalid")} + return nil + } + thirtyDaysToSeconds := int64(30 * 24 * 60 * 60) + _, err := certificateManager.GetDurationToNextRenewal(common.TLS, instance, thirtyDaysToSeconds) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get certificate from bytes")) + }) + + It("gets duration until next renewal 10 days before expire", func() { + tenDaysToSeconds := int64(10 * 24 * 60 * 60) + duration, err := certificateManager.GetDurationToNextRenewal(common.TLS, instance, tenDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(duration.Round(time.Hour)).To(Equal(time.Hour * 24 * 20)) // 10 days before cert that expires in 30 days = 20 days until next renewal + }) + + It("gets duration until next renewal 31 days before expire", func() { + thiryOneDaysToSeconds := int64(31 * 24 * 60 * 60) + duration, err := certificateManager.GetDurationToNextRenewal(common.TLS, instance, thiryOneDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(duration.Round(time.Hour)).To(Equal(time.Duration(0))) // 31 days before cert that expires in 30 days = -1 days until next renewal, so should return 0 + }) + }) + + Context("certificate expiring", func() { + It("returns false if not expiring", func() { + tenDaysToSeconds := int64(10 * 24 * 60 * 60) + expiring, _, err := certificateManager.CertificateExpiring(common.TLS, instance, tenDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(expiring).To(Equal(false)) + }) + + It("returns true if expiring", func() { + thirtyDaysToSeconds := int64(30 * 24 * 60 * 60) + expiring, _, err := certificateManager.CertificateExpiring(common.TLS, instance, thirtyDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(expiring).To(Equal(true)) + }) + }) + + Context("check certificates for expire", func() { + var ( + expiredCert []byte + ) + BeforeEach(func() { + expiredCert = createCert(time.Now().Add(-30 * time.Second)) // expired 30 seconds ago + }) + + It("returns error if fails to get tls signcert expiry info", func() { + mockClient.GetReturns(errors.New("fake error")) + _, _, err := certificateManager.CheckCertificatesForExpire(instance, 0) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get tls signcert expiry info")) + }) + + It("returns deployed status if neither tls nor ecert signcerts are expiring", func() { + tenDaysToSeconds := int64(10 * 24 * 60 * 60) + status, message, err := certificateManager.CheckCertificatesForExpire(instance, tenDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal(current.Deployed)) + Expect(message).To(Equal("")) + }) + + It("returns warning status if either tls or ecert signcert is expiring", func() { + thirtyDaysToSeconds := int64(30 * 24 * 60 * 60) + status, message, err := certificateManager.CheckCertificatesForExpire(instance, thirtyDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal(current.Warning)) + Expect(message).To(ContainSubstring("tls-peer-1-signcert expires on")) + Expect(message).To(ContainSubstring("ecert-peer-1-signcert expires on")) + }) + + It("returns error status if either tls or ecert signcert has expired", func() { + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.Secret) + switch types.Name { + case "tls-" + instance.GetName() + "-signcert": + o.Name = "tls-" + instance.GetName() + "-signcert" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"cert.pem": expiredCert} + case "ecert-" + instance.GetName() + "-signcert": + o.Name = "ecert-" + instance.GetName() + "-signcert" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"cert.pem": certBytes} + } + return nil + } + thirtyDaysToSeconds := int64(30 * 24 * 60 * 60) + status, message, err := certificateManager.CheckCertificatesForExpire(instance, thirtyDaysToSeconds) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal(current.Error)) + Expect(message).To(ContainSubstring("tls-peer-1-signcert has expired")) + Expect(message).To(ContainSubstring("ecert-peer-1-signcert expires on")) + }) + }) + + Context("reenroll cert", func() { + When("not using HSM", func() { + It("returns error if enroller not passed", func() { + err := certificateManager.ReenrollCert("tls", nil, instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("reenroller not passed")) + }) + + It("returns error if reenroll returns error", func() { + mockEnroller.ReenrollReturns(nil, errors.New("fake error")) + err := certificateManager.ReenrollCert("tls", mockEnroller, instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to renew tls certificate for instance 'peer-1': fake error")) + }) + + It("returns error if failed to update signcert secret", func() { + mockClient.UpdateReturns(errors.New("fake error")) + err := certificateManager.ReenrollCert("tls", mockEnroller, instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to update signcert secret for instance 'peer-1': fake error")) + }) + + It("returns error if failed to update keystore secret", func() { + mockClient.UpdateReturnsOnCall(1, errors.New("fake error")) + err := certificateManager.ReenrollCert("tls", mockEnroller, instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to update keystore secret for instance 'peer-1': fake error")) + }) + + It("renews certificate", func() { + err := certificateManager.ReenrollCert("tls", mockEnroller, instance, false) + Expect(err).NotTo(HaveOccurred()) + + By("updating cert and key secret", func() { + Expect(mockClient.UpdateCallCount()).To(Equal(2)) + }) + }) + }) + + When("using HSM", func() { + It("only updates cert secret", func() { + err := certificateManager.ReenrollCert("tls", mockEnroller, instance, true) + Expect(err).NotTo(HaveOccurred()) + Expect(mockClient.UpdateCallCount()).To(Equal(1)) + }) + }) + }) + + Context("update signcert", func() { + It("returns error if client fails to update secret", func() { + mockClient.UpdateReturns(errors.New("fake error")) + err := certificateManager.UpdateSignCert("secret-name", []byte("cert"), instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("fake error")) + }) + + It("updates signcert secret", func() { + err := certificateManager.UpdateSignCert("secret-name", []byte("cert"), instance) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update key", func() { + It("returns error if client fails to update secret", func() { + mockClient.UpdateReturns(errors.New("fake error")) + err := certificateManager.UpdateKey("secret-name", []byte("cert"), instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("fake error")) + }) + + It("updates keystore secret", func() { + err := certificateManager.UpdateKey("secret-name", []byte("cert"), instance) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update secret", func() { + It("returns error if client call for update fails", func() { + mockClient.UpdateReturns(errors.New("fake error")) + err := certificateManager.UpdateSecret(instance, "secret-name", map[string][]byte{}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("fake error")) + }) + + It("updates secret", func() { + err := certificateManager.UpdateSecret(instance, "secret-name", map[string][]byte{}) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("get signcert and key", func() { + When("not using HSM", func() { + It("returns an error if fails to get secret", func() { + mockClient.GetReturns(errors.New("fake error")) + _, _, err := certificateManager.GetSignCertAndKey("tls", instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("fake error")) + }) + + It("gets signcert and key", func() { + cert, key, err := certificateManager.GetSignCertAndKey("tls", instance, false) + Expect(err).NotTo(HaveOccurred()) + Expect(cert).NotTo(BeNil()) + Expect(key).NotTo(BeNil()) + }) + }) + + When("using HSM", func() { + It("gets signcert and empty key", func() { + cert, key, err := certificateManager.GetSignCertAndKey("tls", instance, true) + Expect(err).NotTo(HaveOccurred()) + Expect(cert).NotTo(BeNil()) + Expect(len(key)).To(Equal(0)) + }) + }) + }) +}) + +func createCert(expireDate time.Time) []byte { + certtemplate := x509.Certificate{ + SerialNumber: big.NewInt(1), + NotAfter: expireDate, + } + + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + Expect(err).NotTo(HaveOccurred()) + + cert, err := x509.CreateCertificate(rand.Reader, &certtemplate, &certtemplate, &priv.PublicKey, priv) + Expect(err).NotTo(HaveOccurred()) + + block := &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert, + } + + return pem.EncodeToMemory(block) +} diff --git a/pkg/certificate/mocks/reenroller.go b/pkg/certificate/mocks/reenroller.go new file mode 100644 index 00000000..2c4c66c2 --- /dev/null +++ b/pkg/certificate/mocks/reenroller.go @@ -0,0 +1,108 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" +) + +type Reenroller struct { + ReenrollStub func() (*config.Response, error) + reenrollMutex sync.RWMutex + reenrollArgsForCall []struct { + } + reenrollReturns struct { + result1 *config.Response + result2 error + } + reenrollReturnsOnCall map[int]struct { + result1 *config.Response + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Reenroller) Reenroll() (*config.Response, error) { + fake.reenrollMutex.Lock() + ret, specificReturn := fake.reenrollReturnsOnCall[len(fake.reenrollArgsForCall)] + fake.reenrollArgsForCall = append(fake.reenrollArgsForCall, struct { + }{}) + stub := fake.ReenrollStub + fakeReturns := fake.reenrollReturns + fake.recordInvocation("Reenroll", []interface{}{}) + fake.reenrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Reenroller) ReenrollCallCount() int { + fake.reenrollMutex.RLock() + defer fake.reenrollMutex.RUnlock() + return len(fake.reenrollArgsForCall) +} + +func (fake *Reenroller) ReenrollCalls(stub func() (*config.Response, error)) { + fake.reenrollMutex.Lock() + defer fake.reenrollMutex.Unlock() + fake.ReenrollStub = stub +} + +func (fake *Reenroller) ReenrollReturns(result1 *config.Response, result2 error) { + fake.reenrollMutex.Lock() + defer fake.reenrollMutex.Unlock() + fake.ReenrollStub = nil + fake.reenrollReturns = struct { + result1 *config.Response + result2 error + }{result1, result2} +} + +func (fake *Reenroller) ReenrollReturnsOnCall(i int, result1 *config.Response, result2 error) { + fake.reenrollMutex.Lock() + defer fake.reenrollMutex.Unlock() + fake.ReenrollStub = nil + if fake.reenrollReturnsOnCall == nil { + fake.reenrollReturnsOnCall = make(map[int]struct { + result1 *config.Response + result2 error + }) + } + fake.reenrollReturnsOnCall[i] = struct { + result1 *config.Response + result2 error + }{result1, result2} +} + +func (fake *Reenroller) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.reenrollMutex.RLock() + defer fake.reenrollMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Reenroller) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ certificate.Reenroller = new(Reenroller) diff --git a/pkg/certificate/reenroller/client.go b/pkg/certificate/reenroller/client.go new file mode 100644 index 00000000..ff7f28c0 --- /dev/null +++ b/pkg/certificate/reenroller/client.go @@ -0,0 +1,31 @@ +//go:build !pkcs11 +// +build !pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/hyperledger/fabric-ca/lib" +) + +func GetClient(client *lib.Client, bccsp *commonapi.BCCSP) *lib.Client { + return client +} diff --git a/pkg/certificate/reenroller/client_pkcs11.go b/pkg/certificate/reenroller/client_pkcs11.go new file mode 100644 index 00000000..f35b1a14 --- /dev/null +++ b/pkg/certificate/reenroller/client_pkcs11.go @@ -0,0 +1,57 @@ +//go:build pkcs11 +// +build pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric/bccsp/factory" + "github.com/hyperledger/fabric/bccsp/pkcs11" +) + +func GetClient(client *lib.Client, bccsp *commonapi.BCCSP) *lib.Client { + if bccsp != nil { + if bccsp.PKCS11 != nil { + client.Config.CSP = &factory.FactoryOpts{ + ProviderName: bccsp.ProviderName, + Pkcs11Opts: &pkcs11.PKCS11Opts{ + SecLevel: bccsp.PKCS11.SecLevel, + HashFamily: bccsp.PKCS11.HashFamily, + Ephemeral: bccsp.PKCS11.Ephemeral, + Library: bccsp.PKCS11.Library, + Label: bccsp.PKCS11.Label, + Pin: bccsp.PKCS11.Pin, + SoftVerify: bccsp.PKCS11.SoftVerify, + Immutable: bccsp.PKCS11.Immutable, + }, + } + + if bccsp.PKCS11.FileKeyStore != nil { + client.Config.CSP.Pkcs11Opts.FileKeystore = &pkcs11.FileKeystoreOpts{ + KeyStorePath: bccsp.PKCS11.FileKeyStore.KeyStorePath, + } + } + } + } + + return client +} diff --git a/pkg/certificate/reenroller/hsmdaemonreenroller.go b/pkg/certificate/reenroller/hsmdaemonreenroller.go new file mode 100644 index 00000000..8718ce74 --- /dev/null +++ b/pkg/certificate/reenroller/hsmdaemonreenroller.go @@ -0,0 +1,422 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/tls" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" +) + +type HSMDaemonReenroller struct { + CAClient *lib.Client + Identity Identity + + HomeDir string + Config *current.Enrollment + BCCSP bool + Timeout time.Duration + HSMConfig *config.HSMConfig + Instance Instance + Client k8sclient.Client + Scheme *runtime.Scheme + NewKey bool +} + +func NewHSMDaemonReenroller(cfg *current.Enrollment, homeDir string, bccsp *commonapi.BCCSP, timeoutstring string, hsmConfig *config.HSMConfig, instance Instance, client k8sclient.Client, scheme *runtime.Scheme, newKey bool) (*HSMDaemonReenroller, error) { + if cfg == nil { + return nil, errors.New("unable to reenroll, Enrollment config must be passed") + } + + err := EnrollmentConfigValidation(cfg) + if err != nil { + return nil, err + } + + caclient := &lib.Client{ + HomeDir: homeDir, + Config: &lib.ClientConfig{ + TLS: tls.ClientTLSConfig{ + Enabled: true, + CertFiles: []string{"tlsCert.pem"}, + }, + URL: fmt.Sprintf("https://%s:%s", cfg.CAHost, cfg.CAPort), + }, + } + + bccsp.PKCS11.Library = filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath)) + + caclient = GetClient(caclient, bccsp) + + timeout, err := time.ParseDuration(timeoutstring) + if err != nil || timeoutstring == "" { + timeout = time.Duration(60 * time.Second) + } + + r := &HSMDaemonReenroller{ + CAClient: caclient, + HomeDir: homeDir, + Config: cfg, + Timeout: timeout, + HSMConfig: hsmConfig, + Instance: instance, + Client: client, + Scheme: scheme, + NewKey: newKey, + } + + if bccsp != nil { + r.BCCSP = true + } + + return r, nil +} + +func (r *HSMDaemonReenroller) IsCAReachable() bool { + log.Info("Check if CA is reachable before triggering enroll job") + + timeout := r.Timeout + url := fmt.Sprintf("https://%s:%s/cainfo", r.Config.CAHost, r.Config.CAPort) + + // Convert TLS certificate from base64 to file + tlsCertBytes, err := util.Base64ToBytes(r.Config.CATLS.CACert) + if err != nil { + log.Error(err, "Cannot convert TLS Certificate from base64") + return false + } + + err = wait.Poll(500*time.Millisecond, timeout, func() (bool, error) { + err = util.HealthCheck(url, tlsCertBytes, timeout) + if err == nil { + return true, nil + } + return false, nil + }) + if err != nil { + log.Error(err, "Health check failed") + return false + } + + return true +} + +func (r *HSMDaemonReenroller) Reenroll() (*config.Response, error) { + if !r.IsCAReachable() { + return nil, errors.New("unable to enroll, CA is not reachable") + } + + // Deleting CA client config is an unfortunate requirement since the ca client + // config map was not properly deleted after a successfull reenrollment request. + // This is problematic when recreating a resource with same name, as it will + // try to use old settings in the config map, which might no longer apply, thus + // it must be removed if found before proceeding. + if err := deleteCAClientConfig(r.Client, r.Instance); err != nil { + return nil, err + } + + if err := createRootTLSSecret(r.Client, r.Instance, r.Scheme, r.Config.CATLS.CACert); err != nil { + return nil, err + } + + if err := createCAClientConfig(r.Client, r.Instance, r.Scheme, r.CAClient.Config); err != nil { + return nil, err + } + + job := r.initHSMJob(r.Instance, r.HSMConfig, r.Timeout) + if err := r.Client.Create(context.TODO(), job.Job, k8sclient.CreateOption{ + Owner: r.Instance, + Scheme: r.Scheme, + }); err != nil { + return nil, errors.Wrap(err, "failed to create HSM ca initialization job") + } + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + if err := job.WaitUntilActive(r.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' active", job.GetName())) + + if err := job.WaitUntilContainerFinished(r.Client, CertGen); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + status, err := job.ContainerStatus(r.Client, CertGen) + if err != nil { + return nil, err + } + + switch status { + case jobv1.FAILED: + return nil, fmt.Errorf("Job '%s' finished unsuccessfully, not cleaning up pods to allow for error evaluation", job.GetName()) + case jobv1.COMPLETED: + if err := job.Delete(r.Client); err != nil { + return nil, err + } + + if err := deleteRootTLSSecret(r.Client, r.Instance); err != nil { + return nil, err + } + + if err := deleteCAClientConfig(r.Client, r.Instance); err != nil { + return nil, err + } + } + + if err := r.setControllerReferences(); err != nil { + return nil, err + } + + return &config.Response{}, nil +} + +func (r *HSMDaemonReenroller) setControllerReferences() error { + if err := setControllerReferenceFor(r.Client, r.Instance, r.Scheme, fmt.Sprintf("ecert-%s-signcert", r.Instance.GetName()), false); err != nil { + return err + } + + if err := setControllerReferenceFor(r.Client, r.Instance, r.Scheme, fmt.Sprintf("ecert-%s-cacerts", r.Instance.GetName()), false); err != nil { + return err + } + + if err := setControllerReferenceFor(r.Client, r.Instance, r.Scheme, fmt.Sprintf("ecert-%s-intercerts", r.Instance.GetName()), true); err != nil { + return err + } + + return nil +} + +const ( + // HSMClient is the name of container that contain the HSM client library + HSMClient = "hsm-client" + // CertGen is the name of container that runs the command to generate the certificate for the CA + CertGen = "certgen" +) + +func (r *HSMDaemonReenroller) initHSMJob(instance Instance, hsmConfig *config.HSMConfig, timeout time.Duration) *jobv1.Job { + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + jobName := fmt.Sprintf("%s-reenroll", instance.GetName()) + + f := false + t := true + user := int64(0) + backoffLimit := int32(0) + mountPath := "/shared" + pvcVolumeName := fmt.Sprintf("%s-pvc-volume", instance.GetName()) + + k8sJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "name": jobName, + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: instance.GetName(), + ImagePullSecrets: util.AppendImagePullSecretIfMissing(instance.GetPullSecrets(), hsmConfig.BuildPullSecret()), + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + corev1.Container{ + Name: HSMClient, + Image: hsmConfig.Library.Image, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Containers: []corev1.Container{ + corev1.Container{ + Name: CertGen, + Image: instance.EnrollerImage(), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + Privileged: &t, + AllowPrivilegeEscalation: &t, + }, + Env: hsmConfig.GetEnvs(), + Command: []string{ + "sh", + "-c", + }, + Args: []string{ + fmt.Sprintf(config.DAEMON_CHECK_CMD+" && /usr/local/bin/enroller node reenroll %s %s %s %s %s %s %s %s %s %t", r.HomeDir, "/tmp/fabric-ca-client-config.yaml", r.Config.CAHost, r.Config.CAPort, r.Config.CAName, instance.GetName(), instance.GetNamespace(), r.Config.EnrollID, fmt.Sprintf("%s/cert.pem", r.HomeDir), r.NewKey), + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "tlscertfile", + MountPath: fmt.Sprintf("%s/tlsCert.pem", r.HomeDir), + SubPath: "tlsCert.pem", + }, + corev1.VolumeMount{ + Name: "certfile", + MountPath: fmt.Sprintf("%s/cert.pem", r.HomeDir), + SubPath: "cert.pem", + }, + corev1.VolumeMount{ + Name: "clientconfig", + MountPath: fmt.Sprintf("/tmp/%s", "fabric-ca-client-config.yaml"), + SubPath: "fabric-ca-client-config.yaml", + }, + corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + }, + { + Name: "shared", + MountPath: "/shared", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + corev1.Volume{ + Name: "tlscertfile", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-init-roottls", instance.GetName()), + }, + }, + }, + corev1.Volume{ + Name: "certfile", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-signcert", instance.GetName()), + }, + }, + }, + corev1.Volume{ + Name: "clientconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + }, + }, + }, + }, + { + Name: pvcVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.PVCName(), + }, + }, + }, + }, + }, + }, + }, + } + + job := jobv1.New(k8sJob, &jobv1.Timeouts{ + WaitUntilActive: timeout, + WaitUntilFinished: timeout, + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, hsmConfig.GetVolumes()...) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, hsmConfig.GetVolumeMounts()...) + + // If daemon settings are configured in HSM config, create a sidecar that is running the daemon image + if r.HSMConfig.Daemon != nil { + // Certain token information requires to be stored in persistent store, the administrator + // responsible for configuring HSM sets the HSM config to point to the path where the PVC + // needs to be mounted. + var pvcMount *corev1.VolumeMount + for _, vm := range r.HSMConfig.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: pvcVolumeName, + MountPath: vm.MountPath, + } + } + } + + // Add daemon container to the deployment + config.AddDaemonContainer(r.HSMConfig, job, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the CertGen container + if pvcMount != nil { + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, *pvcMount) + } + } + + return job +} diff --git a/pkg/certificate/reenroller/hsmreenroller.go b/pkg/certificate/reenroller/hsmreenroller.go new file mode 100644 index 00000000..3da83588 --- /dev/null +++ b/pkg/certificate/reenroller/hsmreenroller.go @@ -0,0 +1,492 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/tls" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +type Instance interface { + metav1.Object + EnrollerImage() string + GetPullSecrets() []corev1.LocalObjectReference + GetResource(current.Component) corev1.ResourceRequirements + PVCName() string +} + +type HSMReenroller struct { + CAClient *lib.Client + Identity Identity + + HomeDir string + Config *current.Enrollment + BCCSP bool + Timeout time.Duration + HSMConfig *config.HSMConfig + Instance Instance + Client k8sclient.Client + Scheme *runtime.Scheme + NewKey bool +} + +func NewHSMReenroller(cfg *current.Enrollment, homeDir string, bccsp *commonapi.BCCSP, timeoutstring string, hsmConfig *config.HSMConfig, instance Instance, client k8sclient.Client, scheme *runtime.Scheme, newKey bool) (*HSMReenroller, error) { + if cfg == nil { + return nil, errors.New("unable to reenroll, Enrollment config must be passed") + } + + err := EnrollmentConfigValidation(cfg) + if err != nil { + return nil, err + } + + caclient := &lib.Client{ + HomeDir: homeDir, + Config: &lib.ClientConfig{ + TLS: tls.ClientTLSConfig{ + Enabled: true, + CertFiles: []string{"tlsCert.pem"}, + }, + URL: fmt.Sprintf("https://%s:%s", cfg.CAHost, cfg.CAPort), + }, + } + + bccsp.PKCS11.Library = filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath)) + + caclient = GetClient(caclient, bccsp) + + timeout, err := time.ParseDuration(timeoutstring) + if err != nil || timeoutstring == "" { + timeout = time.Duration(60 * time.Second) + } + + r := &HSMReenroller{ + CAClient: caclient, + HomeDir: homeDir, + Config: cfg, + Timeout: timeout, + HSMConfig: hsmConfig, + Instance: instance, + Client: client, + Scheme: scheme, + NewKey: newKey, + } + + if bccsp != nil { + r.BCCSP = true + } + + return r, nil +} + +func (r *HSMReenroller) IsCAReachable() bool { + log.Info("Check if CA is reachable before triggering enroll job") + + timeout := r.Timeout + url := fmt.Sprintf("https://%s:%s/cainfo", r.Config.CAHost, r.Config.CAPort) + + // Convert TLS certificate from base64 to file + tlsCertBytes, err := util.Base64ToBytes(r.Config.CATLS.CACert) + if err != nil { + log.Error(err, "Cannot convert TLS Certificate from base64") + return false + } + + err = wait.Poll(500*time.Millisecond, timeout, func() (bool, error) { + err = util.HealthCheck(url, tlsCertBytes, timeout) + if err == nil { + return true, nil + } + return false, nil + }) + if err != nil { + log.Error(err, "Health check failed") + return false + } + + return true +} + +func (r *HSMReenroller) Reenroll() (*config.Response, error) { + if !r.IsCAReachable() { + return nil, errors.New("unable to enroll, CA is not reachable") + } + + // Deleting CA client config is an unfortunate requirement since the ca client + // config map was not properly deleted after a successfull reenrollment request. + // This is problematic when recreating a resource with same name, as it will + // try to use old settings in the config map, which might no longer apply, thus + // it must be removed if found before proceeding. + if err := deleteCAClientConfig(r.Client, r.Instance); err != nil { + return nil, err + } + + if err := createRootTLSSecret(r.Client, r.Instance, r.Scheme, r.Config.CATLS.CACert); err != nil { + return nil, err + } + + if err := createCAClientConfig(r.Client, r.Instance, r.Scheme, r.CAClient.Config); err != nil { + return nil, err + } + + job := r.initHSMJob(r.Instance, r.HSMConfig, r.Timeout) + if err := r.Client.Create(context.TODO(), job.Job, k8sclient.CreateOption{ + Owner: r.Instance, + Scheme: r.Scheme, + }); err != nil { + return nil, errors.Wrap(err, "failed to create HSM ca initialization job") + } + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + if err := job.WaitUntilActive(r.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' active", job.GetName())) + + if err := job.WaitUntilFinished(r.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + status, err := job.Status(r.Client) + if err != nil { + return nil, err + } + + switch status { + case jobv1.FAILED: + return nil, fmt.Errorf("Job '%s' finished unsuccessfully, not cleaning up pods to allow for error evaluation", job.GetName()) + case jobv1.COMPLETED: + if err := job.Delete(r.Client); err != nil { + return nil, err + } + + if err := deleteRootTLSSecret(r.Client, r.Instance); err != nil { + return nil, err + } + + if err := deleteCAClientConfig(r.Client, r.Instance); err != nil { + return nil, err + } + } + + if err := r.setControllerReferences(); err != nil { + return nil, err + } + + return &config.Response{}, nil +} + +func (r *HSMReenroller) setControllerReferences() error { + if err := setControllerReferenceFor(r.Client, r.Instance, r.Scheme, fmt.Sprintf("ecert-%s-signcert", r.Instance.GetName()), false); err != nil { + return err + } + + if err := setControllerReferenceFor(r.Client, r.Instance, r.Scheme, fmt.Sprintf("ecert-%s-cacerts", r.Instance.GetName()), false); err != nil { + return err + } + + if err := setControllerReferenceFor(r.Client, r.Instance, r.Scheme, fmt.Sprintf("ecert-%s-intercerts", r.Instance.GetName()), true); err != nil { + return err + } + + return nil +} + +func setControllerReferenceFor(client k8sclient.Client, instance Instance, scheme *runtime.Scheme, name string, skipIfNotFound bool) error { + nn := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + sec := &corev1.Secret{} + if err := client.Get(context.TODO(), nn, sec); err != nil { + if skipIfNotFound { + return nil + } + + return err + } + + if err := client.Update(context.TODO(), sec, k8sclient.UpdateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrapf(err, "failed to update secret '%s' with controller reference", instance.GetName()) + } + + return nil +} + +func createRootTLSSecret(client k8sclient.Client, instance Instance, scheme *runtime.Scheme, cert string) error { + tlsCertBytes, err := util.Base64ToBytes(cert) + if err != nil { + return err + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-roottls", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + Data: map[string][]byte{ + "tlsCert.pem": tlsCertBytes, + }, + } + + if err := client.Create(context.TODO(), secret, k8sclient.CreateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrap(err, "failed to create secret") + } + + return nil +} + +func deleteRootTLSSecret(client k8sclient.Client, instance Instance) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-roottls", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + } + + if err := client.Delete(context.TODO(), secret); err != nil { + return errors.Wrap(err, "failed to delete secret") + } + + return nil +} + +func createCAClientConfig(client k8sclient.Client, instance Instance, scheme *runtime.Scheme, config *lib.ClientConfig) error { + configBytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + BinaryData: map[string][]byte{ + "fabric-ca-client-config.yaml": configBytes, + }, + } + + if err := client.Create(context.TODO(), cm, k8sclient.CreateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrap(err, "failed to create config map") + } + + return nil +} + +func deleteCAClientConfig(k8sClient k8sclient.Client, instance Instance) error { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + } + + if err := k8sClient.Delete(context.TODO(), cm); client.IgnoreNotFound(err) != nil { + return errors.Wrap(err, "failed to delete confk8smap") + } + + return nil +} + +func (r *HSMReenroller) initHSMJob(instance Instance, hsmConfig *config.HSMConfig, timeout time.Duration) *jobv1.Job { + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + jobName := fmt.Sprintf("%s-reenroll", instance.GetName()) + + f := false + user := int64(0) + backoffLimit := int32(0) + mountPath := "/shared" + + k8sJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "name": jobName, + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: instance.GetName(), + ImagePullSecrets: util.AppendImagePullSecretIfMissing(instance.GetPullSecrets(), hsmConfig.BuildPullSecret()), + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + corev1.Container{ + Name: "hsm-client", + Image: hsmConfig.Library.Image, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Containers: []corev1.Container{ + corev1.Container{ + Name: "init", + Image: instance.EnrollerImage(), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("/usr/local/bin/enroller node reenroll %s %s %s %s %s %s %s %s %s %t", r.HomeDir, "/tmp/fabric-ca-client-config.yaml", r.Config.CAHost, r.Config.CAPort, r.Config.CAName, instance.GetName(), instance.GetNamespace(), r.Config.EnrollID, fmt.Sprintf("%s/cert.pem", r.HomeDir), r.NewKey), + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "tlscertfile", + MountPath: fmt.Sprintf("%s/tlsCert.pem", r.HomeDir), + SubPath: "tlsCert.pem", + }, + corev1.VolumeMount{ + Name: "certfile", + MountPath: fmt.Sprintf("%s/cert.pem", r.HomeDir), + SubPath: "cert.pem", + }, + corev1.VolumeMount{ + Name: "clientconfig", + MountPath: fmt.Sprintf("/tmp/%s", "fabric-ca-client-config.yaml"), + SubPath: "fabric-ca-client-config.yaml", + }, + corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + corev1.Volume{ + Name: "tlscertfile", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-init-roottls", instance.GetName()), + }, + }, + }, + corev1.Volume{ + Name: "certfile", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-signcert", instance.GetName()), + }, + }, + }, + corev1.Volume{ + Name: "clientconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + }, + }, + }, + }, + }, + }, + }, + }, + } + + job := jobv1.New(k8sJob, &jobv1.Timeouts{ + WaitUntilActive: timeout, + WaitUntilFinished: timeout, + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, hsmConfig.GetVolumes()...) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, hsmConfig.GetVolumeMounts()...) + + return job +} diff --git a/pkg/certificate/reenroller/mocks/identity.go b/pkg/certificate/reenroller/mocks/identity.go new file mode 100644 index 00000000..4366e9c4 --- /dev/null +++ b/pkg/certificate/reenroller/mocks/identity.go @@ -0,0 +1,249 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate/reenroller" + "github.com/hyperledger/fabric-ca/api" + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/client/credential/x509" +) + +type Identity struct { + GetClientStub func() *lib.Client + getClientMutex sync.RWMutex + getClientArgsForCall []struct { + } + getClientReturns struct { + result1 *lib.Client + } + getClientReturnsOnCall map[int]struct { + result1 *lib.Client + } + GetECertStub func() *x509.Signer + getECertMutex sync.RWMutex + getECertArgsForCall []struct { + } + getECertReturns struct { + result1 *x509.Signer + } + getECertReturnsOnCall map[int]struct { + result1 *x509.Signer + } + ReenrollStub func(*api.ReenrollmentRequest) (*lib.EnrollmentResponse, error) + reenrollMutex sync.RWMutex + reenrollArgsForCall []struct { + arg1 *api.ReenrollmentRequest + } + reenrollReturns struct { + result1 *lib.EnrollmentResponse + result2 error + } + reenrollReturnsOnCall map[int]struct { + result1 *lib.EnrollmentResponse + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Identity) GetClient() *lib.Client { + fake.getClientMutex.Lock() + ret, specificReturn := fake.getClientReturnsOnCall[len(fake.getClientArgsForCall)] + fake.getClientArgsForCall = append(fake.getClientArgsForCall, struct { + }{}) + stub := fake.GetClientStub + fakeReturns := fake.getClientReturns + fake.recordInvocation("GetClient", []interface{}{}) + fake.getClientMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Identity) GetClientCallCount() int { + fake.getClientMutex.RLock() + defer fake.getClientMutex.RUnlock() + return len(fake.getClientArgsForCall) +} + +func (fake *Identity) GetClientCalls(stub func() *lib.Client) { + fake.getClientMutex.Lock() + defer fake.getClientMutex.Unlock() + fake.GetClientStub = stub +} + +func (fake *Identity) GetClientReturns(result1 *lib.Client) { + fake.getClientMutex.Lock() + defer fake.getClientMutex.Unlock() + fake.GetClientStub = nil + fake.getClientReturns = struct { + result1 *lib.Client + }{result1} +} + +func (fake *Identity) GetClientReturnsOnCall(i int, result1 *lib.Client) { + fake.getClientMutex.Lock() + defer fake.getClientMutex.Unlock() + fake.GetClientStub = nil + if fake.getClientReturnsOnCall == nil { + fake.getClientReturnsOnCall = make(map[int]struct { + result1 *lib.Client + }) + } + fake.getClientReturnsOnCall[i] = struct { + result1 *lib.Client + }{result1} +} + +func (fake *Identity) GetECert() *x509.Signer { + fake.getECertMutex.Lock() + ret, specificReturn := fake.getECertReturnsOnCall[len(fake.getECertArgsForCall)] + fake.getECertArgsForCall = append(fake.getECertArgsForCall, struct { + }{}) + stub := fake.GetECertStub + fakeReturns := fake.getECertReturns + fake.recordInvocation("GetECert", []interface{}{}) + fake.getECertMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Identity) GetECertCallCount() int { + fake.getECertMutex.RLock() + defer fake.getECertMutex.RUnlock() + return len(fake.getECertArgsForCall) +} + +func (fake *Identity) GetECertCalls(stub func() *x509.Signer) { + fake.getECertMutex.Lock() + defer fake.getECertMutex.Unlock() + fake.GetECertStub = stub +} + +func (fake *Identity) GetECertReturns(result1 *x509.Signer) { + fake.getECertMutex.Lock() + defer fake.getECertMutex.Unlock() + fake.GetECertStub = nil + fake.getECertReturns = struct { + result1 *x509.Signer + }{result1} +} + +func (fake *Identity) GetECertReturnsOnCall(i int, result1 *x509.Signer) { + fake.getECertMutex.Lock() + defer fake.getECertMutex.Unlock() + fake.GetECertStub = nil + if fake.getECertReturnsOnCall == nil { + fake.getECertReturnsOnCall = make(map[int]struct { + result1 *x509.Signer + }) + } + fake.getECertReturnsOnCall[i] = struct { + result1 *x509.Signer + }{result1} +} + +func (fake *Identity) Reenroll(arg1 *api.ReenrollmentRequest) (*lib.EnrollmentResponse, error) { + fake.reenrollMutex.Lock() + ret, specificReturn := fake.reenrollReturnsOnCall[len(fake.reenrollArgsForCall)] + fake.reenrollArgsForCall = append(fake.reenrollArgsForCall, struct { + arg1 *api.ReenrollmentRequest + }{arg1}) + stub := fake.ReenrollStub + fakeReturns := fake.reenrollReturns + fake.recordInvocation("Reenroll", []interface{}{arg1}) + fake.reenrollMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Identity) ReenrollCallCount() int { + fake.reenrollMutex.RLock() + defer fake.reenrollMutex.RUnlock() + return len(fake.reenrollArgsForCall) +} + +func (fake *Identity) ReenrollCalls(stub func(*api.ReenrollmentRequest) (*lib.EnrollmentResponse, error)) { + fake.reenrollMutex.Lock() + defer fake.reenrollMutex.Unlock() + fake.ReenrollStub = stub +} + +func (fake *Identity) ReenrollArgsForCall(i int) *api.ReenrollmentRequest { + fake.reenrollMutex.RLock() + defer fake.reenrollMutex.RUnlock() + argsForCall := fake.reenrollArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Identity) ReenrollReturns(result1 *lib.EnrollmentResponse, result2 error) { + fake.reenrollMutex.Lock() + defer fake.reenrollMutex.Unlock() + fake.ReenrollStub = nil + fake.reenrollReturns = struct { + result1 *lib.EnrollmentResponse + result2 error + }{result1, result2} +} + +func (fake *Identity) ReenrollReturnsOnCall(i int, result1 *lib.EnrollmentResponse, result2 error) { + fake.reenrollMutex.Lock() + defer fake.reenrollMutex.Unlock() + fake.ReenrollStub = nil + if fake.reenrollReturnsOnCall == nil { + fake.reenrollReturnsOnCall = make(map[int]struct { + result1 *lib.EnrollmentResponse + result2 error + }) + } + fake.reenrollReturnsOnCall[i] = struct { + result1 *lib.EnrollmentResponse + result2 error + }{result1, result2} +} + +func (fake *Identity) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getClientMutex.RLock() + defer fake.getClientMutex.RUnlock() + fake.getECertMutex.RLock() + defer fake.getECertMutex.RUnlock() + fake.reenrollMutex.RLock() + defer fake.reenrollMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Identity) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ reenroller.Identity = new(Identity) diff --git a/pkg/certificate/reenroller/reenroller.go b/pkg/certificate/reenroller/reenroller.go new file mode 100644 index 00000000..fc8ef0ae --- /dev/null +++ b/pkg/certificate/reenroller/reenroller.go @@ -0,0 +1,392 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/hyperledger/fabric-ca/api" + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/client/credential" + fabricx509 "github.com/hyperledger/fabric-ca/lib/client/credential/x509" + "github.com/hyperledger/fabric-ca/lib/tls" + "github.com/hyperledger/fabric/bccsp" + "github.com/hyperledger/fabric/bccsp/utils" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/wait" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("reenroller") + +//go:generate counterfeiter -o mocks/identity.go -fake-name Identity . Identity +type Identity interface { + Reenroll(req *api.ReenrollmentRequest) (*lib.EnrollmentResponse, error) + GetECert() *fabricx509.Signer + GetClient() *lib.Client +} + +type Reenroller struct { + Client *lib.Client + Identity Identity + + HomeDir string + Config *current.Enrollment + BCCSP bool + Timeout time.Duration + NewKey bool +} + +func New(cfg *current.Enrollment, homeDir string, bccsp *commonapi.BCCSP, timeoutstring string, newKey bool) (*Reenroller, error) { + if cfg == nil { + return nil, errors.New("unable to reenroll, Enrollment config must be passed") + } + + err := EnrollmentConfigValidation(cfg) + if err != nil { + return nil, err + } + + client := &lib.Client{ + HomeDir: homeDir, + Config: &lib.ClientConfig{ + TLS: tls.ClientTLSConfig{ + Enabled: true, + CertFiles: []string{"tlsCert.pem"}, + }, + URL: fmt.Sprintf("https://%s:%s", cfg.CAHost, cfg.CAPort), + }, + } + + client = GetClient(client, bccsp) + + timeout, err := time.ParseDuration(timeoutstring) + if err != nil || timeoutstring == "" { + timeout = time.Duration(60 * time.Second) + } + + r := &Reenroller{ + Client: client, + HomeDir: homeDir, + Config: cfg.DeepCopy(), + Timeout: timeout, + NewKey: newKey, + } + + if bccsp != nil { + r.BCCSP = true + } + + return r, nil +} + +func (r *Reenroller) InitClient() error { + if !r.IsCAReachable() { + return errors.New("unable to init client for re-enroll, CA is not reachable") + } + + tlsCertBytes, err := util.Base64ToBytes(r.Config.CATLS.CACert) + if err != nil { + return err + } + err = os.MkdirAll(r.HomeDir, 0750) + if err != nil { + return err + } + + err = util.WriteFile(filepath.Join(r.HomeDir, "tlsCert.pem"), tlsCertBytes, 0755) + if err != nil { + return err + } + + err = r.Client.Init() + if err != nil { + return errors.Wrap(err, "failed to initialize CA client") + } + return nil +} + +func (r *Reenroller) loadHSMIdentity(certPemBytes []byte) error { + log.Info("Loading HSM based identity...") + + csp := r.Client.GetCSP() + certPubK, err := r.Client.GetCSP().KeyImport(certPemBytes, &bccsp.X509PublicKeyImportOpts{Temporary: true}) + if err != nil { + return err + } + + // Get the key given the SKI value + ski := certPubK.SKI() + privateKey, err := csp.GetKey(ski) + if err != nil { + return errors.WithMessage(err, "could not find matching private key for SKI") + } + + // BCCSP returns a public key if the private key for the SKI wasn't found, so + // we need to return an error in that case. + if !privateKey.Private() { + return errors.Errorf("The private key associated with the certificate with SKI '%s' was not found", hex.EncodeToString(ski)) + } + + signer, err := fabricx509.NewSigner(privateKey, certPemBytes) + if err != nil { + return err + } + + cred := fabricx509.NewCredential("", "", r.Client) + err = cred.SetVal(signer) + if err != nil { + return err + } + + r.Identity = lib.NewIdentity(r.Client, r.Config.EnrollID, []credential.Credential{cred}) + + return nil +} + +func (r *Reenroller) loadIdentity(certPemBytes []byte, keyPemBytes []byte) error { + log.Info("Loading software based identity...") + + client := r.Client + enrollmentID := r.Config.EnrollID + + // NOTE: Utilized code from https://github.com/hyperledger/fabric-ca/blob/v2.0.0-alpha/util/csp.go#L220 + // but modified to use pem bytes instead of file since we store the key in a secret, not in filesystem + var bccspKey bccsp.Key + temporary := true + key, err := utils.PEMtoPrivateKey(keyPemBytes, nil) + if err != nil { + return errors.Wrap(err, "failed to get private key from pem bytes") + } + switch key.(type) { + case *ecdsa.PrivateKey: + priv, err := utils.PrivateKeyToDER(key.(*ecdsa.PrivateKey)) + if err != nil { + return errors.Wrap(err, "failed to marshal ECDSA private key to der") + } + bccspKey, err = client.GetCSP().KeyImport(priv, &bccsp.ECDSAPrivateKeyImportOpts{Temporary: temporary}) + if err != nil { + return errors.Wrap(err, "failed to import ECDSA private key") + } + default: + return errors.New("failed to import key, invalid secret key type") + } + + signer, err := fabricx509.NewSigner(bccspKey, certPemBytes) + if err != nil { + return err + } + + cred := fabricx509.NewCredential("", "", client) + err = cred.SetVal(signer) + if err != nil { + return err + } + + r.Identity = lib.NewIdentity(client, enrollmentID, []credential.Credential{cred}) + + return nil +} + +func (r *Reenroller) LoadIdentity(certPemBytes []byte, keyPemBytes []byte, hsmEnabled bool) error { + if hsmEnabled { + err := r.loadHSMIdentity(certPemBytes) + if err != nil { + return errors.Wrap(err, "failed to load HSM based identity") + } + + return nil + } + + err := r.loadIdentity(certPemBytes, keyPemBytes) + if err != nil { + return errors.Wrap(err, "failed to load identity") + } + + return nil +} + +func (r *Reenroller) IsCAReachable() bool { + timeout := r.Timeout + url := fmt.Sprintf("https://%s:%s/cainfo", r.Config.CAHost, r.Config.CAPort) + + // Convert TLS certificate from base64 to file + tlsCertBytes, err := util.Base64ToBytes(r.Config.CATLS.CACert) + if err != nil { + log.Error(err, "Cannot convert TLS Certificate from base64") + return false + } + + err = wait.Poll(500*time.Millisecond, timeout, func() (bool, error) { + err = util.HealthCheck(url, tlsCertBytes, timeout) + if err == nil { + return true, nil + } + return false, nil + }) + if err != nil { + log.Error(err, "Health check failed") + return false + } + + return true +} + +func (r *Reenroller) Reenroll() (*config.Response, error) { + reuseKey := true + if r.NewKey { + reuseKey = false + } + + reenrollReq := &api.ReenrollmentRequest{ + CAName: r.Config.CAName, + CSR: &api.CSRInfo{ + KeyRequest: &api.KeyRequest{ + ReuseKey: reuseKey, + }, + }, + } + + if r.Config.CSR != nil && len(r.Config.CSR.Hosts) > 0 { + reenrollReq.CSR.Hosts = r.Config.CSR.Hosts + } + + log.Info(fmt.Sprintf("Re-enrolling with CA '%s' with request %+v, csr %+v", r.Config.CAHost, reenrollReq, reenrollReq.CSR)) + + reenrollResp, err := r.Identity.Reenroll(reenrollReq) + if err != nil { + return nil, errors.Wrap(err, "failed to re-enroll with CA") + } + + newIdentity := reenrollResp.Identity + + resp := &config.Response{} + resp.SignCert = newIdentity.GetECert().Cert() + + // Only need to read key if a new key is being generated, which does not happen + // if the reenroll request has "ReuseKey" set to true + if !reuseKey { + key, err := r.ReadKey() + if err != nil { + return nil, err + } + resp.Keystore = key + } + + // NOTE: Added this logic because the keystore file wasn't getting + // deleted, which impacts the next time the certificate is renewed, in that + // when trying to ReadKey(), there would be more than 1 file present. + err = r.DeleteKeystoreFile() + if err != nil { + return nil, err + } + + // TODO: Currently not parsing reenroll response to get CACerts and + // Intermediate Certs again (like we do when inintially enrolling with CA) + // as those certs shouldn't need to be updated + + return resp, nil +} + +func (r *Reenroller) ReadKey() ([]byte, error) { + if r.BCCSP { + return nil, nil + } + + keystoreDir := filepath.Join(r.HomeDir, "msp", "keystore") + + files, err := ioutil.ReadDir(keystoreDir) + if err != nil { + return nil, err + } + + if len(files) > 1 { + return nil, errors.Errorf("expecting only one key file to present in keystore '%s', but found multiple", keystoreDir) + } + + for _, file := range files { + fileBytes, err := ioutil.ReadFile(filepath.Clean(filepath.Join(keystoreDir, file.Name()))) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(fileBytes) + if block == nil { + continue + } + + _, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err == nil { + return fileBytes, nil + } + } + + return nil, errors.Errorf("failed to read private key from dir '%s'", keystoreDir) +} + +func (r *Reenroller) DeleteKeystoreFile() error { + keystoreDir := filepath.Join(r.HomeDir, "msp", "keystore") + + files, err := ioutil.ReadDir(keystoreDir) + if err != nil { + return err + } + + for _, file := range files { + err = os.Remove(filepath.Join(keystoreDir, file.Name())) + if err != nil { + return errors.Wrapf(err, "failed to delete keystore directory '%s'", keystoreDir) + } + } + + return nil +} + +func EnrollmentConfigValidation(enrollConfig *current.Enrollment) error { + if enrollConfig.CAHost == "" { + return errors.New("unable to reenroll, CA host not specified") + } + + if enrollConfig.CAPort == "" { + return errors.New("unable to reenroll, CA port not specified") + } + + if enrollConfig.EnrollID == "" { + return errors.New("unable to reenroll, enrollment ID not specified") + } + + if enrollConfig.CATLS.CACert == "" { + return errors.New("unable to reenroll, CA TLS certificate not specified") + } + + return nil +} diff --git a/pkg/certificate/reenroller/reenroller_suite_test.go b/pkg/certificate/reenroller/reenroller_suite_test.go new file mode 100644 index 00000000..2ba87ff9 --- /dev/null +++ b/pkg/certificate/reenroller/reenroller_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestReenroller(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Reenroller Suite") +} diff --git a/pkg/certificate/reenroller/reenroller_test.go b/pkg/certificate/reenroller/reenroller_test.go new file mode 100644 index 00000000..956c33b4 --- /dev/null +++ b/pkg/certificate/reenroller/reenroller_test.go @@ -0,0 +1,221 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reenroller_test + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate/reenroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate/reenroller/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/client/credential" + fabricx509 "github.com/hyperledger/fabric-ca/lib/client/credential/x509" + "github.com/hyperledger/fabric-ca/lib/tls" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +const ( + homeDir = "test-reenroller-dir" + testkey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" +) + +var _ = Describe("Reenroller", func() { + var ( + err error + + testReenroller *reenroller.Reenroller + config *current.Enrollment + mockIdentity *mocks.Identity + + server *httptest.Server + serverURL string + serverCert string + serverUrlObj *url.URL + ) + + BeforeSuite(func() { + // Start a local HTTP server + server = httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Test request parameters + Expect(req.URL.String()).To(Equal("/cainfo")) + return + })) + + serverURL = server.URL + rawCert := server.Certificate().Raw + pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawCert}) + serverCert = string(util.BytesToBase64(pemCert)) + + urlObj, err := url.Parse(serverURL) + Expect(err).NotTo(HaveOccurred()) + serverUrlObj = urlObj + + // Generate temporary key for reenroll test + keystorePath := filepath.Join(homeDir, "msp", "keystore") + err = os.MkdirAll(keystorePath, 0755) + Expect(err).NotTo(HaveOccurred()) + + key, err := util.Base64ToBytes(testkey) + Expect(err).NotTo(HaveOccurred()) + err = ioutil.WriteFile(filepath.Join(keystorePath, "key.pem"), key, 0755) + }) + + BeforeEach(func() { + mockIdentity = &mocks.Identity{} + + config = ¤t.Enrollment{ + CAHost: serverUrlObj.Hostname(), + CAPort: serverUrlObj.Port(), + EnrollID: "admin", + EnrollSecret: "adminpw", + CATLS: ¤t.CATLS{ + CACert: serverCert, + }, + CSR: ¤t.CSR{ + Hosts: []string{"csrhost"}, + }, + } + + client := &lib.Client{ + HomeDir: homeDir, + Config: &lib.ClientConfig{ + TLS: tls.ClientTLSConfig{ + Enabled: true, + CertFiles: []string{"tlsCert.pem"}, + }, + URL: fmt.Sprintf("https://%s:%s", config.CAHost, config.CAPort), + }, + } + + timeout, _ := time.ParseDuration("10s") + testReenroller = &reenroller.Reenroller{ + Client: client, + Identity: mockIdentity, + Config: config, + HomeDir: homeDir, + Timeout: timeout, + } + + signer := &fabricx509.Signer{} + cred := &fabricx509.Credential{} + cred.SetVal(signer) + mockIdentity.ReenrollReturns(&lib.EnrollmentResponse{ + Identity: lib.NewIdentity(&lib.Client{}, "caIdentity", []credential.Credential{cred}), + }, nil) + }) + + AfterSuite(func() { + // Close the server when test finishes + server.Close() + + err = os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("Enrollment configuration validation", func() { + It("returns an error if missing CA host", func() { + config.CAHost = "" + _, err = reenroller.New(config, homeDir, nil, "", true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("unable to reenroll, CA host not specified")) + }) + + It("returns an error if missing CA Port", func() { + config.CAPort = "" + _, err = reenroller.New(config, homeDir, nil, "", true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("unable to reenroll, CA port not specified")) + }) + + It("returns an error if missing enrollment ID", func() { + config.EnrollID = "" + _, err = reenroller.New(config, homeDir, nil, "", true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("unable to reenroll, enrollment ID not specified")) + }) + + It("returns an error if missing TLS cert", func() { + config.CATLS.CACert = "" + _, err = reenroller.New(config, homeDir, nil, "", true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("unable to reenroll, CA TLS certificate not specified")) + }) + }) + + Context("test avaialability of CA", func() { + It("returns false if CA is not reachable", func() { + timeout, _ := time.ParseDuration("0.5s") + testReenroller.Timeout = timeout + testReenroller.Config.CAHost = "unreachable.test" + reachable := testReenroller.IsCAReachable() + Expect(reachable).To(BeFalse()) + }) + It("returns true if CA is reachable", func() { + timeout, _ := time.ParseDuration("0.5s") + testReenroller.Timeout = timeout + testReenroller.Config.CAHost = serverUrlObj.Hostname() + testReenroller.Config.CAPort = serverUrlObj.Port() + testReenroller.Config.CATLS.CACert = serverCert + reachable := testReenroller.IsCAReachable() + Expect(reachable).To(BeTrue()) + }) + }) + + Context("init client", func() { + It("returns an error if failed to initialize CA client", func() { + testReenroller.Config.CATLS.CACert = "" + err = testReenroller.InitClient() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("unable to init client for re-enroll, CA is not reachable")) + }) + + It("returns initializes CA client", func() { + err = testReenroller.InitClient() + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("reenrolls with CA", func() { + + It("returns an error if reenrollment with CA fails", func() { + mockIdentity.ReenrollReturns(nil, errors.New("bad reenrollment")) + _, err = testReenroller.Reenroll() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to re-enroll with CA: bad reenrollment")) + }) + + It("reenrolls with CA for new certificate", func() { + _, err = testReenroller.Reenroll() + Expect(err).NotTo(HaveOccurred()) + }) + }) + +}) diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 00000000..0894fa46 --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,74 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" +) + +const ( + CRDGroup string = "ibp.com" + CRDVersion string = "v1beta1" +) + +var SchemeGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + +type IBPClient struct { + rest.Interface +} + +func New(cfg *rest.Config) (*IBPClient, error) { + scheme := runtime.NewScheme() + SchemeBuilder := runtime.NewSchemeBuilder(addKnownTypes) + err := SchemeBuilder.AddToScheme(scheme) + if err != nil { + return nil, err + } + + config := *cfg + config.GroupVersion = &SchemeGroupVersion + config.APIPath = "/apis" + config.ContentType = runtime.ContentTypeJSON + config.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &IBPClient{client}, nil +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + ¤t.IBPCA{}, + ¤t.IBPCAList{}, + ¤t.IBPPeer{}, + ¤t.IBPPeerList{}, + ¤t.IBPOrderer{}, + ¤t.IBPOrdererList{}, + ¤t.IBPConsole{}, + ¤t.IBPConsoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/client/client_suite_test.go b/pkg/client/client_suite_test.go new file mode 100644 index 00000000..101c1adf --- /dev/null +++ b/pkg/client/client_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestClient(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Client Suite") +} diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go new file mode 100644 index 00000000..ba1e13be --- /dev/null +++ b/pkg/client/client_test.go @@ -0,0 +1,34 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" +) + +var _ = Describe("Client", func() { + It("creates a client", func() { + restConfig := &rest.Config{} + client, err := New(restConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(client).NotTo(BeNil()) + }) +}) diff --git a/pkg/command/command_suite_test.go b/pkg/command/command_suite_test.go new file mode 100644 index 00000000..d4e797d5 --- /dev/null +++ b/pkg/command/command_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package command_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCommand(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Command Suite") +} diff --git a/pkg/command/crdinstall.go b/pkg/command/crdinstall.go new file mode 100644 index 00000000..a89ff81b --- /dev/null +++ b/pkg/command/crdinstall.go @@ -0,0 +1,60 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package command + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/crd" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/clientset" + "github.com/pkg/errors" + "k8s.io/client-go/rest" +) + +func CRDInstall(dir string) error { + config, err := rest.InClusterConfig() + if err != nil { + return errors.Wrap(err, "failed to get cluster config") + } + + err = CRDInstallUsingConfig(config, dir) + if err != nil { + return errors.Wrap(err, "failed to install CRDs") + } + + return nil +} + +func CRDInstallUsingConfig(config *rest.Config, dir string) error { + clientSet, err := clientset.New(config) + if err != nil { + return errors.Wrap(err, "failed to get client") + } + + crds := crd.GetCRDListFromDir(dir) + manager, err := crd.NewManager(clientSet, crds...) + if err != nil { + return errors.Wrap(err, "failed to create CRD manager") + } + + err = manager.Create() + if err != nil { + return errors.Wrap(err, "failed to create CRDs") + } + + return nil +} diff --git a/pkg/command/mocks/reader.go b/pkg/command/mocks/reader.go new file mode 100644 index 00000000..b1bb1d0c --- /dev/null +++ b/pkg/command/mocks/reader.go @@ -0,0 +1,196 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/command" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Reader struct { + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Reader) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Reader) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Reader) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Reader) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Reader) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Reader) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Reader) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Reader) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Reader) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Reader) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Reader) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Reader) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Reader) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Reader) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ command.Reader = new(Reader) diff --git a/pkg/command/operator.go b/pkg/command/operator.go new file mode 100644 index 00000000..1575b821 --- /dev/null +++ b/pkg/command/operator.go @@ -0,0 +1,293 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package command + +import ( + "context" + "flag" + "fmt" + "os" + "runtime" + "time" + + k8sruntime "k8s.io/apimachinery/pkg/runtime" + + routev1 "github.com/openshift/api/route/v1" + "github.com/operator-framework/operator-lib/leader" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + apis "github.com/IBM-Blockchain/fabric-operator/api" + ibpv1beta1 "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controller "github.com/IBM-Blockchain/fabric-operator/controllers" + oconfig "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + openshiftv1 "github.com/openshift/api/config/v1" + + "k8s.io/apimachinery/pkg/types" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" +) + +var log = logf.Log.WithName("cmd_operator") + +var ( + scheme = k8sruntime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(ibpv1beta1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func printVersion() { + log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) + log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) +} + +func Operator(operatorCfg *oconfig.Config, blocking bool) error { + signalHandler := signals.SetupSignalHandler() + return OperatorWithSignal(operatorCfg, signalHandler, blocking, false) +} + +func OperatorWithSignal(operatorCfg *oconfig.Config, signalHandler context.Context, blocking, local bool) error { + var err error + + // Add the zap logger flag set to the CLI. The flag set must + // be added before calling pflag.Parse(). + // pflag.CommandLine.AddFlagSet(flagset) + + // Add flags registered by imported packages (e.g. glog and + // controller-runtime) + // pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // pflag.Parse() + + // Use a zap logr.Logger implementation. If none of the zap + // flags are configured (or if the zap flag set is not being + // used), this defaults to a production zap logger. + // + // The logger instantiated here can be changed to any logger + // implementing the logr.Logger interface. This logger will + // be propagated through the whole operator, generating + // uniform and structured logs. + if operatorCfg.Logger != nil { + logf.SetLogger(*operatorCfg.Logger) + ctrl.SetLogger(*operatorCfg.Logger) + } else { + logf.SetLogger(zap.New()) + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + } + + printVersion() + + watchNamespace := os.Getenv("WATCH_NAMESPACE") + var operatorNamespace string + if watchNamespace == "" { + // Operator is running in all namespace mode + log.Info("Installing operator in all namespace mode") + operatorNamespace, err = GetOperatorNamespace() + if err != nil { + log.Error(err, "Failed to get operator namespace") + time.Sleep(15 * time.Second) + return err + } + } else { + log.Info("Installing operator in own namespace mode") + operatorNamespace = watchNamespace + } + + if !local { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + err = leader.Become(context.TODO(), label+"-operator-lock") + if err != nil { + log.Error(err, "Failed to retry for leader lock") + os.Exit(1) + } + } else { + log.Info("local run detected, skipping leader election") + } + + var metricsAddr string + var enableLeaderElection bool + + if flag.Lookup("metrics-addr") == nil { + flag.StringVar(&metricsAddr, "metrics-addr", ":8383", "The address the metric endpoint binds to.") + } + if flag.Lookup("enable-leader-election") == nil { + flag.BoolVar(&enableLeaderElection, "enable-leader-election", true, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + } + flag.Parse() + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + // LeaderElection: enableLeaderElection, + LeaderElectionID: "c30dd930.ibp.com", + LeaderElectionNamespace: operatorNamespace, + Namespace: watchNamespace, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + return err + } + + log.Info("Registering Components.") + + // Setup Scheme for all resources + if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + log.Error(err, "") + return err + } + + //Add route scheme + if err := routev1.AddToScheme(mgr.GetScheme()); err != nil { + log.Error(err, "") + return err + } + + //Add clusterversion scheme + if err := openshiftv1.AddToScheme(mgr.GetScheme()); err != nil { + log.Error(err, "") + return err + } + + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(ibpv1beta1.AddToScheme(scheme)) + + go func() { + runtime.Gosched() + mgrSyncContext, mgrSyncContextCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer mgrSyncContextCancel() + + log.Info("Waiting for cache sync") + if synced := mgr.GetCache().WaitForCacheSync(mgrSyncContext); !synced { + log.Error(nil, "Timed out waiting for cache sync") + os.Exit(1) + } + + log.Info("Cache sync done") + + // Migrate first + m := migrator.New(mgr, operatorCfg, operatorNamespace) + err = m.Migrate() + if err != nil { + log.Error(err, "Unable to complete migration") + os.Exit(1) + } + + // Setup all Controllers + if err := controller.AddToManager(mgr, operatorCfg); err != nil { + log.Error(err, "") + os.Exit(1) + } + }() + + if err := InitConfig(operatorNamespace, operatorCfg, mgr.GetAPIReader()); err != nil { + log.Error(err, "Invalid configuration") + time.Sleep(15 * time.Second) + return err + } + + log.Info("Starting the Cmd.") + + // Start the Cmd + if blocking { + if err := mgr.Start(signalHandler); err != nil { + log.Error(err, "Manager exited non-zero") + return err + } + } else { + go mgr.Start(signalHandler) + } + + return nil +} + +//go:generate counterfeiter -o mocks/reader.go -fake-name Reader . Reader + +type Reader interface { + client.Reader +} + +// InitConfig initializes the passed in config by overriding values from environment variable +// or config map if set +func InitConfig(namespace string, cfg *oconfig.Config, client client.Reader) error { + // Read from config map if it exists otherwise return default values + err := oconfig.LoadFromConfigMap( + types.NamespacedName{Name: "operator-config", Namespace: namespace}, + "config.yaml", + client, + &cfg.Operator, + ) + if err != nil { + return errors.Wrap(err, "failed to get 'config.yaml' from 'ibp-operator' config map") + } + + clusterType := os.Getenv("CLUSTERTYPE") + offeringType, err := offering.GetType(clusterType) + if err != nil { + return err + } + cfg.Offering = offeringType + + log.Info(fmt.Sprintf("Operator configured for cluster type '%s'", cfg.Offering)) + + if cfg.Operator.Versions == nil { + return errors.New("no default images defined") + } + + if cfg.Operator.Versions.CA == nil { + return errors.New("no default CA images defined") + } + + if cfg.Operator.Versions.Peer == nil { + return errors.New("no default Peer images defined") + } + + if cfg.Operator.Versions.Orderer == nil { + return errors.New("no default Orderer images defined") + } + + return nil +} + +func GetOperatorNamespace() (string, error) { + operatorNamespace := os.Getenv("OPERATOR_NAMESPACE") + if operatorNamespace == "" { + return "", fmt.Errorf("OPERATOR_NAMESPACE not found") + } + + return operatorNamespace, nil +} diff --git a/pkg/command/operator_test.go b/pkg/command/operator_test.go new file mode 100644 index 00000000..51291ec6 --- /dev/null +++ b/pkg/command/operator_test.go @@ -0,0 +1,83 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package command_test + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + oconfig "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/command" + "github.com/IBM-Blockchain/fabric-operator/pkg/command/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" +) + +var _ = Describe("Operator command", func() { + Context("config initialization", func() { + var config *oconfig.Config + + BeforeEach(func() { + os.Setenv("CLUSTERTYPE", "K8S") + + config = &oconfig.Config{ + Operator: oconfig.Operator{ + Versions: &deployer.Versions{ + CA: map[string]deployer.VersionCA{}, + Peer: map[string]deployer.VersionPeer{}, + Orderer: map[string]deployer.VersionOrderer{}, + }, + }, + } + }) + + Context("cluster type", func() { + It("returns error for invalid cluster type value", func() { + os.Setenv("CLUSTERTYPE", "") + err := command.InitConfig("", config, &mocks.Reader{}) + Expect(err).To(HaveOccurred()) + }) + + It("sets value", func() { + os.Setenv("CLUSTERTYPE", "K8S") + err := command.InitConfig("", config, &mocks.Reader{}) + Expect(err).NotTo(HaveOccurred()) + Expect(config.Offering).To(Equal(offering.K8S)) + }) + }) + + Context("secret poll timeout", func() { + It("returns default value inf invalid timeout value set", func() { + os.Setenv("IBPOPERATOR_ORDERER_TIMEOUTS_SECRETPOLL", "45") + err := command.InitConfig("", config, &mocks.Reader{}) + Expect(err).To(HaveOccurred()) + }) + + It("sets value", func() { + os.Setenv("IBPOPERATOR_ORDERER_TIMEOUTS_SECRETPOLL", "45s") + err := command.InitConfig("", config, &mocks.Reader{}) + Expect(err).NotTo(HaveOccurred()) + Expect(config.Operator.Orderer.Timeouts.SecretPoll).To(Equal(common.MustParseDuration("45s"))) + }) + }) + }) +}) diff --git a/pkg/controller/mocks/client.go b/pkg/controller/mocks/client.go new file mode 100644 index 00000000..ee14505d --- /dev/null +++ b/pkg/controller/mocks/client.go @@ -0,0 +1,746 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Client struct { + CreateStub func(context.Context, client.Object, ...controllerclient.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + CreateOrUpdateStub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + PatchStatusStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchStatusMutex sync.RWMutex + patchStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchStatusReturns struct { + result1 error + } + patchStatusReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...controllerclient.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + UpdateStatusStub func(context.Context, client.Object, ...client.UpdateOption) error + updateStatusMutex sync.RWMutex + updateStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateStatusReturns struct { + result1 error + } + updateStatusReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) Create(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Client) CreateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Client) CreateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdate(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOrUpdateOption) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *Client) CreateOrUpdateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *Client) CreateOrUpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOrUpdateOption) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *Client) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *Client) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Client) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Client) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Client) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Client) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *Client) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *Client) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatus(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchStatusMutex.Lock() + ret, specificReturn := fake.patchStatusReturnsOnCall[len(fake.patchStatusArgsForCall)] + fake.patchStatusArgsForCall = append(fake.patchStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStatusStub + fakeReturns := fake.patchStatusReturns + fake.recordInvocation("PatchStatus", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchStatusCallCount() int { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + return len(fake.patchStatusArgsForCall) +} + +func (fake *Client) PatchStatusCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = stub +} + +func (fake *Client) PatchStatusArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + argsForCall := fake.patchStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchStatusReturns(result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + fake.patchStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatusReturnsOnCall(i int, result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + if fake.patchStatusReturnsOnCall == nil { + fake.patchStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Update(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Client) UpdateCalls(stub func(context.Context, client.Object, ...controllerclient.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Client) UpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatus(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateStatusMutex.Lock() + ret, specificReturn := fake.updateStatusReturnsOnCall[len(fake.updateStatusArgsForCall)] + fake.updateStatusArgsForCall = append(fake.updateStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStatusStub + fakeReturns := fake.updateStatusReturns + fake.recordInvocation("UpdateStatus", []interface{}{arg1, arg2, arg3}) + fake.updateStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateStatusCallCount() int { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + return len(fake.updateStatusArgsForCall) +} + +func (fake *Client) UpdateStatusCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = stub +} + +func (fake *Client) UpdateStatusArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + argsForCall := fake.updateStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateStatusReturns(result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + fake.updateStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatusReturnsOnCall(i int, result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + if fake.updateStatusReturnsOnCall == nil { + fake.updateStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ controllerclient.Client = new(Client) diff --git a/pkg/crd/crd_suite_test.go b/pkg/crd/crd_suite_test.go new file mode 100644 index 00000000..8f0fc7ba --- /dev/null +++ b/pkg/crd/crd_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package crd_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCrd(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Crd Suite") +} diff --git a/pkg/crd/manager.go b/pkg/crd/manager.go new file mode 100644 index 00000000..06f0de60 --- /dev/null +++ b/pkg/crd/manager.go @@ -0,0 +1,94 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package crd + +import ( + "path/filepath" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + CACRD = "./config/crd/bases/ibp_v1alpha1_ibpca.yaml" + PeerCRD = "./config/crd/bases/ibp_v1alpha1_ibppeer.yaml" + OrdererCRD = "./config/crd/bases/ibp_v1alpha1_ibporderer.yaml" + ConsoleCRD = "./config/crd/bases/ibp_v1alpha1_ibpconsole.yaml" + + CACRDFile = "ibp_v1alpha1_ibpca.yaml" + PEERCRDFIle = "ibp_v1alpha1_ibppeer.yaml" + ORDERERCRDFILE = "ibp_v1alpha1_ibporderer.yaml" + CONSOLECRDFILE = "ibp_v1alpha1_ibpconsole.yaml" +) + +var log = logf.Log.WithName("crd_manager") + +//go:generate counterfeiter -o mocks/client.go -fake-name Client . Client + +type Client interface { + CreateCRD(crd *extv1.CustomResourceDefinition) (*extv1.CustomResourceDefinition, error) +} + +type Manager struct { + Client Client + crds []*extv1.CustomResourceDefinition +} + +func GetCRDList() []string { + return []string{CACRD, PeerCRD, OrdererCRD, ConsoleCRD} +} + +func GetCRDListFromDir(dir string) []string { + + crds := []string{ + filepath.Join(dir, CACRDFile), + filepath.Join(dir, PEERCRDFIle), + filepath.Join(dir, ORDERERCRDFILE), + filepath.Join(dir, CONSOLECRDFILE), + } + + return crds +} + +func NewManager(c Client, files ...string) (*Manager, error) { + m := &Manager{ + Client: c, + } + for _, file := range files { + crd, err := util.GetCRDFromFile(file) + if err != nil { + return nil, err + } + m.crds = append(m.crds, crd) + } + return m, nil +} + +func (m *Manager) Create() error { + log.Info("Create CRDs") + for _, crd := range m.crds { + log.Info("Creating", "CRD", crd.Name) + _, err := m.Client.CreateCRD(crd) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/crd/manager_test.go b/pkg/crd/manager_test.go new file mode 100644 index 00000000..62085427 --- /dev/null +++ b/pkg/crd/manager_test.go @@ -0,0 +1,75 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package crd_test + +import ( + "errors" + + "github.com/IBM-Blockchain/fabric-operator/pkg/crd" + "github.com/IBM-Blockchain/fabric-operator/pkg/crd/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("manager", func() { + var mockClient *mocks.Client + + BeforeEach(func() { + mockClient = &mocks.Client{} + }) + + Context("NewManager", func() { + It("returns an error if it fails to load a file", func() { + m, err := crd.NewManager(mockClient, "bad.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + Expect(m).To(BeNil()) + }) + + It("returns a manager", func() { + m, err := crd.NewManager(mockClient, "../../config/crd/bases/ibp.com_ibpcas.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(m).NotTo(BeNil()) + }) + }) + + Context("Create", func() { + var ( + err error + manager *crd.Manager + ) + + BeforeEach(func() { + manager, err = crd.NewManager(mockClient, "../../config/crd/bases/ibp.com_ibpcas.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error if it fails to create CRD", func() { + mockClient.CreateCRDReturns(nil, errors.New("failed to create crd")) + err = manager.Create() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create crd")) + }) + + It("returns no error on successful creation", func() { + err = manager.Create() + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/crd/mocks/client.go b/pkg/crd/mocks/client.go new file mode 100644 index 00000000..c540a370 --- /dev/null +++ b/pkg/crd/mocks/client.go @@ -0,0 +1,117 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/crd" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +type Client struct { + CreateCRDStub func(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + createCRDMutex sync.RWMutex + createCRDArgsForCall []struct { + arg1 *v1.CustomResourceDefinition + } + createCRDReturns struct { + result1 *v1.CustomResourceDefinition + result2 error + } + createCRDReturnsOnCall map[int]struct { + result1 *v1.CustomResourceDefinition + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) CreateCRD(arg1 *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) { + fake.createCRDMutex.Lock() + ret, specificReturn := fake.createCRDReturnsOnCall[len(fake.createCRDArgsForCall)] + fake.createCRDArgsForCall = append(fake.createCRDArgsForCall, struct { + arg1 *v1.CustomResourceDefinition + }{arg1}) + stub := fake.CreateCRDStub + fakeReturns := fake.createCRDReturns + fake.recordInvocation("CreateCRD", []interface{}{arg1}) + fake.createCRDMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Client) CreateCRDCallCount() int { + fake.createCRDMutex.RLock() + defer fake.createCRDMutex.RUnlock() + return len(fake.createCRDArgsForCall) +} + +func (fake *Client) CreateCRDCalls(stub func(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error)) { + fake.createCRDMutex.Lock() + defer fake.createCRDMutex.Unlock() + fake.CreateCRDStub = stub +} + +func (fake *Client) CreateCRDArgsForCall(i int) *v1.CustomResourceDefinition { + fake.createCRDMutex.RLock() + defer fake.createCRDMutex.RUnlock() + argsForCall := fake.createCRDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Client) CreateCRDReturns(result1 *v1.CustomResourceDefinition, result2 error) { + fake.createCRDMutex.Lock() + defer fake.createCRDMutex.Unlock() + fake.CreateCRDStub = nil + fake.createCRDReturns = struct { + result1 *v1.CustomResourceDefinition + result2 error + }{result1, result2} +} + +func (fake *Client) CreateCRDReturnsOnCall(i int, result1 *v1.CustomResourceDefinition, result2 error) { + fake.createCRDMutex.Lock() + defer fake.createCRDMutex.Unlock() + fake.CreateCRDStub = nil + if fake.createCRDReturnsOnCall == nil { + fake.createCRDReturnsOnCall = make(map[int]struct { + result1 *v1.CustomResourceDefinition + result2 error + }) + } + fake.createCRDReturnsOnCall[i] = struct { + result1 *v1.CustomResourceDefinition + result2 error + }{result1, result2} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createCRDMutex.RLock() + defer fake.createCRDMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ crd.Client = new(Client) diff --git a/pkg/global/config.go b/pkg/global/config.go new file mode 100644 index 00000000..b1f93e98 --- /dev/null +++ b/pkg/global/config.go @@ -0,0 +1,64 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package global + +import ( + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + ibpdep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + ibpjob "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigSetter sets values on all resources created by operator +type ConfigSetter struct { + Config config.Globals +} + +type resource interface { + UpdateSecurityContextForAllContainers(sc container.SecurityContext) +} + +// Apply applies all global configurations +func (cs *ConfigSetter) Apply(obj runtime.Object) { + cs.UpdateSecurityContextForAllContainers(obj) +} + +// UpdateSecurityContextForAllContainers updates the security context for all containers defined on +// resource object +func (cs *ConfigSetter) UpdateSecurityContextForAllContainers(obj runtime.Object) { + if cs.Config.SecurityContext == nil { + return + } + + var resource resource + switch obj.(type) { + case *appsv1.Deployment: + resource = ibpdep.New(obj.(*appsv1.Deployment)) + case *batchv1.Job: + resource = ibpjob.NewWithDefaults(obj.(*batchv1.Job)) + default: + return + } + + resource.UpdateSecurityContextForAllContainers(*cs.Config.SecurityContext) +} diff --git a/pkg/global/config_test.go b/pkg/global/config_test.go new file mode 100644 index 00000000..de6ab852 --- /dev/null +++ b/pkg/global/config_test.go @@ -0,0 +1,163 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package global_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("Global config", func() { + var ( + f = false + root = int64(0) + + configSetter *global.ConfigSetter + ) + + BeforeEach(func() { + configSetter = &global.ConfigSetter{ + Config: config.Globals{ + SecurityContext: &container.SecurityContext{ + RunAsNonRoot: &f, + Privileged: &f, + RunAsUser: &root, + AllowPrivilegeEscalation: &f, + }, + }, + } + }) + + Context("security context on containers", func() { + Context("job", func() { + var job *batchv1.Job + + BeforeEach(func() { + job = &batchv1.Job{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "initcontainer1", + }, + { + Name: "initcontainer2", + }, + }, + Containers: []corev1.Container{ + { + Name: "container1", + }, + { + Name: "container2", + }, + }, + }, + }, + }, + } + }) + + It("updates security context", func() { + configSetter.UpdateSecurityContextForAllContainers(job) + + for _, cont := range job.Spec.Template.Spec.InitContainers { + Expect(*cont.SecurityContext).To(MatchFields(IgnoreExtras, Fields{ + "RunAsNonRoot": Equal(&f), + "Privileged": Equal(&f), + "RunAsUser": Equal(&root), + "AllowPrivilegeEscalation": Equal(&f), + })) + } + + for _, cont := range job.Spec.Template.Spec.Containers { + Expect(*cont.SecurityContext).To(MatchFields(IgnoreExtras, Fields{ + "RunAsNonRoot": Equal(&f), + "Privileged": Equal(&f), + "RunAsUser": Equal(&root), + "AllowPrivilegeEscalation": Equal(&f), + })) + } + }) + }) + + Context("deployment", func() { + var dep *appsv1.Deployment + + BeforeEach(func() { + dep = &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "initcontainer1", + }, + { + Name: "initcontainer2", + }, + }, + Containers: []corev1.Container{ + { + Name: "container1", + }, + { + Name: "container2", + }, + }, + }, + }, + }, + } + }) + + It("updates security context", func() { + configSetter.UpdateSecurityContextForAllContainers(dep) + + for _, cont := range dep.Spec.Template.Spec.InitContainers { + Expect(*cont.SecurityContext).To(MatchFields(IgnoreExtras, Fields{ + "RunAsNonRoot": Equal(&f), + "Privileged": Equal(&f), + "RunAsUser": Equal(&root), + "AllowPrivilegeEscalation": Equal(&f), + })) + } + + for _, cont := range dep.Spec.Template.Spec.Containers { + Expect(*cont.SecurityContext).To(MatchFields(IgnoreExtras, Fields{ + "RunAsNonRoot": Equal(&f), + "Privileged": Equal(&f), + "RunAsUser": Equal(&root), + "AllowPrivilegeEscalation": Equal(&f), + })) + } + }) + }) + }) +}) diff --git a/pkg/global/global_suite_test.go b/pkg/global/global_suite_test.go new file mode 100644 index 00000000..9cb833d2 --- /dev/null +++ b/pkg/global/global_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package global_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestGlobal(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Global Suite") +} diff --git a/pkg/initializer/ca/bccsp/config.go b/pkg/initializer/ca/bccsp/config.go new file mode 100644 index 00000000..58b823d9 --- /dev/null +++ b/pkg/initializer/ca/bccsp/config.go @@ -0,0 +1,45 @@ +//go:build !pkcs11 +// +build !pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bccsp + +import ( + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/hyperledger/fabric/bccsp/factory" +) + +func GetBCCSPOpts(from v1.BCCSP) *factory.FactoryOpts { + factoryOpts := &factory.FactoryOpts{ + ProviderName: from.ProviderName, + } + + if from.SW != nil { + factoryOpts.SwOpts = &factory.SwOpts{ + SecLevel: from.SW.SecLevel, + HashFamily: from.SW.HashFamily, + FileKeystore: &factory.FileKeystoreOpts{ + KeyStorePath: from.SW.FileKeyStore.KeyStorePath, + }, + } + } + + return factoryOpts +} diff --git a/pkg/initializer/ca/bccsp/configpkcs11.go b/pkg/initializer/ca/bccsp/configpkcs11.go new file mode 100644 index 00000000..3760a98a --- /dev/null +++ b/pkg/initializer/ca/bccsp/configpkcs11.go @@ -0,0 +1,63 @@ +//go:build pkcs11 +// +build pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bccsp + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/hyperledger/fabric/bccsp/factory" + "github.com/hyperledger/fabric/bccsp/pkcs11" +) + +func GetBCCSPOpts(from config.BCCSP) *factory.FactoryOpts { + factoryOpts := &factory.FactoryOpts{ + ProviderName: from.ProviderName, + } + + if from.SW != nil { + factoryOpts.SwOpts = &factory.SwOpts{ + SecLevel: from.SW.SecLevel, + HashFamily: from.SW.HashFamily, + FileKeystore: &factory.FileKeystoreOpts{ + KeyStorePath: from.SW.FileKeyStore.KeyStorePath, + }, + } + } + + if from.PKCS11 != nil { + factoryOpts.Pkcs11Opts = &pkcs11.PKCS11Opts{ + SecLevel: from.PKCS11.SecLevel, + HashFamily: from.PKCS11.HashFamily, + Library: from.PKCS11.Library, + Label: from.PKCS11.Label, + Pin: from.PKCS11.Pin, + SoftVerify: from.PKCS11.SoftVerify, + } + + if from.PKCS11.FileKeystore != nil { + factoryOpts.Pkcs11Opts.FileKeystore = &pkcs11.FileKeystoreOpts{ + KeyStorePath: from.PKCS11.FileKeyStore.KeyStorePath, + } + } + } + + return factoryOpts +} diff --git a/pkg/initializer/ca/ca.go b/pkg/initializer/ca/ca.go new file mode 100644 index 00000000..c57d552a --- /dev/null +++ b/pkg/initializer/ca/ca.go @@ -0,0 +1,476 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "time" + + _ "github.com/lib/pq" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + "github.com/hyperledger/fabric-ca/lib" + "github.com/pkg/errors" + "github.com/spf13/viper" + "sigs.k8s.io/yaml" +) + +//go:generate counterfeiter -o mocks/config.go -fake-name CAConfig . CAConfig + +type CAConfig interface { + GetServerConfig() *v1.ServerConfig + ParseCABlock() (map[string][]byte, error) + ParseDBBlock() (map[string][]byte, error) + ParseTLSBlock() (map[string][]byte, error) + ParseOperationsBlock() (map[string][]byte, error) + ParseIntermediateBlock() (map[string][]byte, error) + SetServerConfig(*v1.ServerConfig) + SetMountPaths(config.Type) + GetHomeDir() string + SetUpdate(bool) + UsingPKCS11() bool +} + +type CA struct { + CN string + Config CAConfig + Viper *viper.Viper + Type config.Type + SqliteDir string + UsingHSMProxy bool + + configFile string +} + +func LoadConfigFromFile(file string) (*v1.ServerConfig, error) { + serverConfig := &v1.ServerConfig{} + bytes, err := ioutil.ReadFile(filepath.Clean(file)) + if err != nil { + return nil, err + } + + err = yaml.Unmarshal(bytes, serverConfig) + if err != nil { + return nil, err + } + + err = yaml.Unmarshal(bytes, &serverConfig.CAConfig) + if err != nil { + return nil, err + } + + return serverConfig, nil +} + +func NewCA(config CAConfig, caType config.Type, sqliteDir string, hsmProxy bool, cn string) *CA { + return &CA{ + CN: cn, + Config: config, + Viper: viper.New(), + Type: caType, + configFile: fmt.Sprintf("%s/fabric-ca-server-config.yaml", config.GetHomeDir()), + SqliteDir: sqliteDir, + UsingHSMProxy: hsmProxy, + } +} + +func (ca *CA) OverrideServerConfig(newConfig *v1.ServerConfig) (err error) { + serverConfig := ca.Config.GetServerConfig() + + log.Info("Overriding config values from ca initializer") + // If newConfig isn't passed, we want to make sure serverConfig.CAConfig.CSR.Cn is set + // to ca.CN by default; if newConfig is passed for an intermediate CA, the logic below + // will handle setting CN to blank if ParentServer.URL is set + serverConfig.CAConfig.CSR.CN = ca.CN + + if newConfig != nil { + log.Info("Overriding config values from spec") + err = merge.WithOverwrite(ca.Config.GetServerConfig(), newConfig) + if err != nil { + return errors.Wrapf(err, "failed to merge override configuration") + } + + if ca.Config.UsingPKCS11() { + ca.SetPKCS11Defaults(serverConfig) + } + + // Passing in CN when enrolling an intermediate CA will cause the fabric-ca + // server to error out, a CN cannot be passed for intermediate CA. Setting + // CN to blank if ParentServer.URL is set + if serverConfig.CAConfig.Intermediate.ParentServer.URL != "" { + serverConfig.CAConfig.CSR.CN = "" + } + } + + ca.setDefaults(serverConfig) + + return nil +} + +func (ca *CA) WriteConfig() (err error) { + dir := ca.Config.GetHomeDir() + log.Info(fmt.Sprintf("Writing config to file: '%s'", dir)) + + bytes, err := ca.ConfigToBytes() + if err != nil { + return err + } + + err = util.EnsureDir(dir) + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Clean(ca.configFile), bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (ca *CA) Init() (err error) { + if ca.Config.UsingPKCS11() && ca.UsingHSMProxy { + env := os.Getenv("PKCS11_PROXY_SOCKET") + if env == "" { + return errors.New("ca configured to use PKCS11, but no PKCS11 proxy endpoint set") + } + if !util.IsTCPReachable(env) { + return errors.New(fmt.Sprintf("Unable to reach PKCS11 proxy: %s", env)) + } + } + + cfg, err := ca.ViperUnmarshal(ca.configFile) + if err != nil { + return errors.Wrap(err, "viper unmarshal failed") + } + + dir := filepath.Dir(ca.configFile) + // TODO check if this is required!! + cfg.Metrics.Provider = "disabled" + + if cfg.CAcfg.DB.Type == "postgres" { + if !ca.IsPostgresReachable(cfg.CAcfg.DB) { + return errors.New("Cannot initialize CA. Postgres is not reachable") + } + } + + parentURL := cfg.CAcfg.Intermediate.ParentServer.URL + if parentURL != "" { + log.Info(fmt.Sprintf("Request received to enroll with parent server: %s", parentURL)) + + err = ca.HealthCheck(parentURL, cfg.CAcfg.Intermediate.TLS.CertFiles[0]) + if err != nil { + return errors.Wrap(err, "could not connect to parent CA") + } + } + + caserver := &lib.Server{ + HomeDir: dir, + Config: cfg, + CA: lib.CA{ + Config: &cfg.CAcfg, + }, + } + + err = caserver.Init(false) + if err != nil { + return err + } + serverConfig := ca.Config.GetServerConfig() + serverConfig.CA.Certfile = caserver.CA.Config.CA.Certfile + serverConfig.CA.Keyfile = caserver.CA.Config.CA.Keyfile + serverConfig.CA.Chainfile = caserver.CA.Config.CA.Chainfile + + if ca.Type.Is(config.EnrollmentCA) { + serverConfig.CAfiles = []string{"/data/tlsca/fabric-ca-server-config.yaml"} + } + + return nil +} + +func (ca *CA) IsPostgresReachable(db lib.CAConfigDB) bool { + + datasource := db.Datasource + if db.TLS.CertFiles != nil && len(db.TLS.CertFiles) > 0 { + // The first cert because that is what hyperledger/fabric-ca uses + datasource = fmt.Sprintf("%s sslrootcert=%s", datasource, db.TLS.CertFiles[0]) + } + + if db.TLS.Client.CertFile != "" { + datasource = fmt.Sprintf("%s sslcert=%s", datasource, db.TLS.Client.CertFile) + } + + if db.TLS.Client.KeyFile != "" { + datasource = fmt.Sprintf("%s sslkey=%s", datasource, db.TLS.Client.KeyFile) + } + + sqldb, err := sql.Open(db.Type, datasource) + if err != nil { + return false + } + defer sqldb.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + err = sqldb.PingContext(ctx) + if err != nil { + return false + } + + return true +} + +// ViperUnmarshal as this is what fabric-ca uses when it reads it's configuration +// file +func (ca *CA) ViperUnmarshal(configFile string) (*lib.ServerConfig, error) { + ca.Viper.SetConfigFile(configFile) + err := ca.Viper.ReadInConfig() + if err != nil { + return nil, errors.Wrapf(err, "viper unable to read in config: %s", configFile) + } + + config := &lib.ServerConfig{} + err = ca.Viper.Unmarshal(config) + if err != nil { + return nil, errors.Wrap(err, "viper unable to unmarshal into server level config") + } + + err = ca.Viper.Unmarshal(&config.CAcfg) + if err != nil { + return nil, errors.Wrap(err, "viper unable to unmarshal into CA level config") + } + + return config, nil +} + +func (ca *CA) ParseCrypto() (map[string][]byte, error) { + switch ca.Type { + case config.EnrollmentCA: + return ca.ParseEnrollmentCACrypto() + case config.TLSCA: + return ca.ParseTLSCACrypto() + } + + return nil, fmt.Errorf("unsupported ca type '%s'", ca.Type) +} + +func (ca *CA) ParseEnrollmentCACrypto() (map[string][]byte, error) { + serverConfig := ca.Config.GetServerConfig() + if serverConfig.TLS.IsEnabled() { + // TLS cert and key file must always be set. Operator should auto generate + // TLS cert and key if none are provided. + if serverConfig.TLS.CertFile == "" && serverConfig.TLS.KeyFile == "" { + return nil, errors.New("no TLS cert and key file provided") + } + } + + if serverConfig.Operations.TLS.IsEnabled() { + // Same set of TLS certificate that are used for CA endpoint is also used for operations endpoint + serverConfig.Operations.TLS.CertFile = serverConfig.TLS.CertFile + serverConfig.Operations.TLS.KeyFile = serverConfig.TLS.KeyFile + } + + crypto, err := ca.Config.ParseCABlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse ca block") + } + + tlsCrypto, err := ca.Config.ParseTLSBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse tls block") + } + crypto = util.JoinMaps(crypto, tlsCrypto) + + dbCrypto, err := ca.Config.ParseDBBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse db block") + } + crypto = util.JoinMaps(crypto, dbCrypto) + + opsCrypto, err := ca.Config.ParseOperationsBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse operations block") + } + crypto = util.JoinMaps(crypto, opsCrypto) + + intCrypto, err := ca.Config.ParseIntermediateBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse intermediate block") + } + crypto = util.JoinMaps(crypto, intCrypto) + + return crypto, nil +} + +func (ca *CA) ParseTLSCACrypto() (map[string][]byte, error) { + crypto, err := ca.ParseCABlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse ca block") + } + + tlsCrypto, err := ca.Config.ParseTLSBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse tls block") + } + crypto = util.JoinMaps(crypto, tlsCrypto) + + dbCrypto, err := ca.Config.ParseDBBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse db block") + } + crypto = util.JoinMaps(crypto, dbCrypto) + + intCrypto, err := ca.Config.ParseIntermediateBlock() + if err != nil { + return nil, errors.Wrap(err, "failed to parse intermediate block") + } + crypto = util.JoinMaps(crypto, intCrypto) + + return crypto, nil +} + +func (ca *CA) ParseCABlock() (map[string][]byte, error) { + crypto, err := ca.Config.ParseCABlock() + if err != nil { + return nil, err + } + + return crypto, nil +} + +func (ca *CA) ConfigToBytes() ([]byte, error) { + + bytes, err := yaml.Marshal(ca.Config.GetServerConfig()) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (ca *CA) SetMountPaths() { + ca.Config.SetMountPaths(ca.Type) +} + +func (ca *CA) SetPKCS11Defaults(serverConfig *v1.ServerConfig) { + if serverConfig.CAConfig.CSP.PKCS11 == nil { + serverConfig.CAConfig.CSP.PKCS11 = &v1.PKCS11Opts{} + } + + if ca.UsingHSMProxy { + serverConfig.CAConfig.CSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + serverConfig.CAConfig.CSP.PKCS11.FileKeyStore.KeyStorePath = "msp/keystore" + + if serverConfig.CAConfig.CSP.PKCS11.HashFamily == "" { + serverConfig.CAConfig.CSP.PKCS11.HashFamily = "SHA2" + } + + if serverConfig.CAConfig.CSP.PKCS11.SecLevel == 0 { + serverConfig.CAConfig.CSP.PKCS11.SecLevel = 256 + } +} + +func (ca *CA) GetHomeDir() string { + return ca.Config.GetHomeDir() +} + +func (ca *CA) GetServerConfig() *v1.ServerConfig { + return ca.Config.GetServerConfig() +} + +func (ca *CA) RemoveHomeDir() error { + err := os.RemoveAll(ca.GetHomeDir()) + if err != nil { + return err + } + return nil +} + +func (ca *CA) IsBeingUpdated() { + ca.Config.SetUpdate(true) +} + +func (ca *CA) IsHSMEnabled() bool { + if ca.Config.UsingPKCS11() { + return true + } + return false +} + +func (ca *CA) HealthCheck(parentURL, certPath string) error { + parsedURL, err := url.Parse(parentURL) + if err != nil { + return errors.Wrapf(err, "invalid CA url") + } + + healthURL := getHealthCheckEndpoint(parsedURL) + log.Info(fmt.Sprintf("Health checking parent server, pinging %s", healthURL)) + + // Make sure that parent server is running before trying to enroll + // intermediate CA. Retry 5 times for a total of 5 seconds to make + // sure parent server is up. If parent server is found, bail early + // and continue with enrollment + cert, err := ioutil.ReadFile(filepath.Clean(certPath)) + if err != nil { + return errors.Wrap(err, "failed to read TLS cert for intermediate enrollment") + } + + for i := 0; i < 5; i++ { + err = util.HealthCheck(healthURL, cert, 30*time.Second) + if err != nil { + log.Info(fmt.Sprintf("Health check error: %s", err)) + time.Sleep(1 * time.Second) + log.Info("Health check failed, retrying") + continue + } + log.Info("Health check successfull") + break + } + + return nil +} + +func (ca *CA) GetType() config.Type { + return ca.Type +} + +func getHealthCheckEndpoint(u *url.URL) string { + return fmt.Sprintf("%s://%s/cainfo", u.Scheme, u.Host) +} + +func (ca *CA) setDefaults(serverConfig *v1.ServerConfig) { + serverConfig.CAConfig.Cfg.Identities.AllowRemove = pointer.True() + serverConfig.CAConfig.Cfg.Affiliations.AllowRemove = pointer.True() + // Ignore Certificate Expiry for re-enroll + serverConfig.CA.ReenrollIgnoreCertExpiry = pointer.True() +} diff --git a/pkg/initializer/ca/ca_suite_test.go b/pkg/initializer/ca/ca_suite_test.go new file mode 100644 index 00000000..3fe60afd --- /dev/null +++ b/pkg/initializer/ca/ca_suite_test.go @@ -0,0 +1,33 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} + +//go:generate counterfeiter -o mocks/client.go -fake-name Client ../../k8s/controllerclient Client diff --git a/pkg/initializer/ca/ca_test.go b/pkg/initializer/ca/ca_test.go new file mode 100644 index 00000000..4c93e56e --- /dev/null +++ b/pkg/initializer/ca/ca_test.go @@ -0,0 +1,298 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "path/filepath" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +var _ = Describe("IBPCA", func() { + Context("reading file", func() { + It("fails load configuration file that doesn't exist", func() { + _, err := initializer.LoadConfigFromFile("notexist.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("loads from ca configuration file", func() { + cfg, err := initializer.LoadConfigFromFile("../../../defaultconfig/ca/ca.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + }) + }) + + Context("ca initializion", func() { + var ( + ca *initializer.CA + defaultConfig *mocks.CAConfig + ) + + BeforeEach(func() { + cfg := &config.Config{ + HomeDir: "ca_test", + MountPath: "/mount", + ServerConfig: &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CSP: &v1.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &v1.PKCS11Opts{ + Pin: "1234", + Label: "root", + }, + }, + CSR: v1.CSRInfo{ + CN: "ca", + }, + }, + }, + } + + defaultConfig = &mocks.CAConfig{} + defaultConfig.GetServerConfigReturns(cfg.ServerConfig) + defaultConfig.GetHomeDirReturns(cfg.HomeDir) + + ca = initializer.NewCA(defaultConfig, config.EnrollmentCA, "/tmp", true, "ca_test") + }) + + It("sets default values", func() { + err := ca.OverrideServerConfig(nil) + Expect(err).NotTo(HaveOccurred()) + + By("enabling removal of identities and affiliations", func() { + Expect(*defaultConfig.GetServerConfig().CAConfig.Cfg.Identities.AllowRemove).To(Equal(true)) + Expect(*defaultConfig.GetServerConfig().CAConfig.Cfg.Affiliations.AllowRemove).To(Equal(true)) + }) + By("enabling ignore cert expiry for re-enroll", func() { + Expect(*defaultConfig.GetServerConfig().CAConfig.CA.ReenrollIgnoreCertExpiry).To(Equal(true)) + }) + }) + + It("does not crash if CSP is nil in override config", func() { + override := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CSP: nil, + }, + } + + err := ca.OverrideServerConfig(override) + Expect(err).NotTo(HaveOccurred()) + + By("setting in defaults when using pkcs11", func() { + Expect(defaultConfig.GetServerConfig().CAConfig.CSP).To(Equal(ca.GetServerConfig().CAConfig.CSP)) + Expect(defaultConfig.GetServerConfig().CAConfig.CSR.CN).To(Equal("ca_test")) + }) + }) + + It("overrides config", func() { + override := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CSP: &v1.BCCSP{ + ProviderName: "PKCS11", + }, + }, + } + defaultConfig.UsingPKCS11Returns(true) + + err := ca.OverrideServerConfig(override) + Expect(err).NotTo(HaveOccurred()) + + By("setting in defaults when using pkcs11", func() { + Expect(defaultConfig.GetServerConfig().CAConfig.CSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(defaultConfig.GetServerConfig().CAConfig.CSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("msp/keystore")) + Expect(defaultConfig.GetServerConfig().CAConfig.CSP.PKCS11.HashFamily).To(Equal("SHA2")) + Expect(defaultConfig.GetServerConfig().CAConfig.CSP.PKCS11.SecLevel).To(Equal(256)) + Expect(defaultConfig.GetServerConfig().CAConfig.CSR.CN).To(Equal("ca_test")) + }) + }) + + It("successfully completes initializing intermediate ca", func() { + override := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + Intermediate: v1.IntermediateCA{ + ParentServer: v1.ParentServer{ + URL: "127.0.0.1", + }, + }, + }, + } + + err := ca.OverrideServerConfig(override) + Expect(err).NotTo(HaveOccurred()) + + By("setting cn in csr to be empty", func() { + Expect(defaultConfig.GetServerConfig().CAConfig.CSR.CN).To(Equal("")) + }) + }) + + It("writes configuration to file", func() { + err := ca.WriteConfig() + Expect(err).NotTo(HaveOccurred()) + Expect(filepath.Join(defaultConfig.GetHomeDir(), "fabric-ca-server-config.yaml")).Should(BeAnExistingFile()) + + ca.RemoveHomeDir() + }) + + Context("run fabric-ca init", func() { + BeforeEach(func() { + cfg, err := initializer.LoadConfigFromFile("../../../defaultconfig/ca/ca.yaml") + Expect(err).NotTo(HaveOccurred()) + defaultConfig.GetServerConfigReturns(cfg) + + err = ca.WriteConfig() + Expect(err).NotTo(HaveOccurred()) + }) + + It("successfully completes Initializing ca", func() { + err := ca.Init() + Expect(err).NotTo(HaveOccurred()) + + By("setting ca files property to point to tls ca config file", func() { + Expect(defaultConfig.GetServerConfig().CAfiles).To(Equal([]string{"/data/tlsca/fabric-ca-server-config.yaml"})) + }) + + By("setting cert/key file to generate location", func() { + Expect(defaultConfig.GetServerConfig().CA.Certfile).To(ContainSubstring(filepath.Join(ca.Config.GetHomeDir(), "ca-cert.pem"))) + Expect(defaultConfig.GetServerConfig().CA.Keyfile).To(ContainSubstring(filepath.Join(ca.Config.GetHomeDir(), "ca-key.pem"))) + }) + + }) + + AfterEach(func() { + err := ca.RemoveHomeDir() + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("viper unmarshal", func() { + It("returns an error if fails to find file", func() { + _, err := ca.ViperUnmarshal("../../../defaultconfig/ca/foo.yaml") + Expect(err).To(HaveOccurred()) + }) + + It("successfully unmarshals", func() { + cfg, err := ca.ViperUnmarshal("../../../defaultconfig/ca/tlsca.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + }) + }) + + Context("parse enrollment ca crypto", func() { + BeforeEach(func() { + defaultConfig.GetServerConfig().TLS = v1.ServerTLSConfig{ + Enabled: pointer.True(), + CertFile: "../../../testdata/tls/tls.crt", + KeyFile: "../../../testdata/tls/tls.key", + } + + defaultConfig.GetServerConfig().Operations = v1.Options{ + TLS: v1.TLS{ + Enabled: pointer.True(), + }, + } + }) + + It("returns an error if TLS cert and key not provided", func() { + defaultConfig.GetServerConfig().TLS.CertFile = "" + defaultConfig.GetServerConfig().TLS.KeyFile = "" + + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no TLS cert and key file provided")) + }) + + It("returns an error is parsing ca blocks fails", func() { + msg := "failed ca parse" + defaultConfig.ParseCABlockReturns(nil, errors.New(msg)) + + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to parse ca block: " + msg)) + }) + + It("returns an error is parsing TLS blocks fails", func() { + msg := "failed tls parse" + defaultConfig.ParseTLSBlockReturns(nil, errors.New(msg)) + + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to parse tls block: " + msg)) + }) + + It("returns an error is parsing DB blocks fails", func() { + msg := "failed db parse" + defaultConfig.ParseDBBlockReturns(nil, errors.New(msg)) + + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to parse db block: " + msg)) + }) + + It("returns an error is parsing operations blocks fails", func() { + msg := "failed operations parse" + defaultConfig.ParseOperationsBlockReturns(nil, errors.New(msg)) + + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to parse operations block: " + msg)) + }) + + It("returns an error is parsing intermediate blocks fails", func() { + msg := "failed operations parse" + defaultConfig.ParseIntermediateBlockReturns(nil, errors.New(msg)) + + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to parse intermediate block: " + msg)) + }) + + It("sets the operations TLS path to be equal to server's TLS path", func() { + _, err := ca.ParseEnrollmentCACrypto() + Expect(err).NotTo(HaveOccurred()) + + Expect(defaultConfig.GetServerConfig().Operations.TLS.CertFile).To(ContainSubstring("tls/tls.crt")) + Expect(defaultConfig.GetServerConfig().Operations.TLS.KeyFile).To(ContainSubstring("tls/tls.key")) + }) + }) + + Context("parse TLS ca crypto", func() { + It("returns an error is parsing ca blocks fails", func() { + msg := "failed ca parse" + defaultConfig.ParseCABlockReturns(nil, errors.New(msg)) + + _, err := ca.ParseTLSCACrypto() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to parse ca block: " + msg)) + }) + + It("parses ca blocks fails", func() { + _, err := ca.ParseTLSCACrypto() + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/initializer/ca/config/ca.go b/pkg/initializer/ca/config/ca.go new file mode 100644 index 00000000..c5516688 --- /dev/null +++ b/pkg/initializer/ca/config/ca.go @@ -0,0 +1,71 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "path/filepath" +) + +func (c *Config) ParseCABlock() (map[string][]byte, error) { + log.Info("Parsing CA block") + + if c.caCrypto == nil { + c.caCrypto = map[string][]byte{} + } + + certFile := c.ServerConfig.CAConfig.CA.Certfile + keyFile := c.ServerConfig.CAConfig.CA.Keyfile + + if certFile == "" && keyFile == "" { + return nil, nil + } + + err := c.HandleCertInput(certFile, "cert.pem", c.caCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.CA.Certfile = filepath.Join(c.HomeDir, "cert.pem") + + err = c.HandleKeyInput(keyFile, "key.pem", c.caCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.CA.Keyfile = filepath.Join(c.HomeDir, "key.pem") + + chainFile := c.ServerConfig.CAConfig.CA.Chainfile + if chainFile != "" { + err := c.HandleCertInput(chainFile, "chain.pem", c.caCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.CA.Chainfile = filepath.Join(c.HomeDir, "chain.pem") + } + + return c.caCrypto, nil +} + +func (c *Config) CAMountPath() { + c.ServerConfig.CAConfig.CA.Keyfile = filepath.Join(c.MountPath, "key.pem") + c.ServerConfig.CAConfig.CA.Certfile = filepath.Join(c.MountPath, "cert.pem") + + chainFile := c.ServerConfig.CAConfig.CA.Chainfile + if chainFile != "" { + c.ServerConfig.CAConfig.CA.Chainfile = filepath.Join(c.MountPath, "chain.pem") + } +} diff --git a/pkg/initializer/ca/config/ca_test.go b/pkg/initializer/ca/config/ca_test.go new file mode 100644 index 00000000..3561eb64 --- /dev/null +++ b/pkg/initializer/ca/config/ca_test.go @@ -0,0 +1,114 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "os" + "path/filepath" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("CA config", func() { + var ( + cfg *config.Config + homeDir = "caconfigtest" + ) + + BeforeEach(func() { + os.Mkdir(homeDir, 0777) + }) + + AfterEach(func() { + err := os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("parses CA configuration", func() { + BeforeEach(func() { + cfg = &config.Config{ + ServerConfig: &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CA: v1.CAInfo{ + Certfile: certFile, + Keyfile: keyFile, + Chainfile: certFile, + }, + }, + }, + HomeDir: homeDir, + } + }) + + It("returns an error if unexpected type passed for keyfile and no key found in keystore", func() { + cfg.HomeDir = "fake" + cfg.ServerConfig.CAConfig.CA.Keyfile = "invalidType" + _, err := cfg.ParseCABlock() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + os.RemoveAll(cfg.HomeDir) + }) + + It("if key is unexpected type look in keystore folder for key", func() { + cfg.HomeDir = "../../../../testdata" + cfg.ServerConfig.CAConfig.CA.Keyfile = "invalidType" + crypto, err := cfg.ParseCABlock() + Expect(err).NotTo(HaveOccurred()) + + keyData, keyKeyExists := crypto["key.pem"] + Expect(keyKeyExists).To(Equal(true)) + Expect(keyData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.CA.Keyfile).To(Equal(filepath.Join(cfg.HomeDir, "key.pem"))) + + os.Remove(filepath.Join(cfg.HomeDir, "cert.pem")) + os.Remove(filepath.Join(cfg.HomeDir, "key.pem")) + os.Remove(filepath.Join(cfg.HomeDir, "chain.pem")) + }) + + It("returns if unexpected type passed for trusted root cert files", func() { + cfg.ServerConfig.CAConfig.CA.Chainfile = "invalidType" + c, err := cfg.ParseCABlock() + Expect(err).NotTo(HaveOccurred()) + Expect(c).NotTo(BeNil()) + }) + + It("parses config and returns a map containing all crypto and updated paths to crypto material", func() { + crypto, err := cfg.ParseCABlock() + Expect(err).NotTo(HaveOccurred()) + + certData, certKeyExists := crypto["cert.pem"] + Expect(certKeyExists).To(Equal(true)) + Expect(certData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.CA.Certfile).To(Equal(filepath.Join(cfg.HomeDir, "cert.pem"))) + + keyData, keyKeyExists := crypto["key.pem"] + Expect(keyKeyExists).To(Equal(true)) + Expect(keyData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.CA.Keyfile).To(Equal(filepath.Join(cfg.HomeDir, "key.pem"))) + + chainData, chainKeyExists := crypto["chain.pem"] + Expect(chainKeyExists).To(Equal(true)) + Expect(chainData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.CA.Chainfile).To(Equal(filepath.Join(cfg.HomeDir, "chain.pem"))) + }) + }) +}) diff --git a/pkg/initializer/ca/config/config.go b/pkg/initializer/ca/config/config.go new file mode 100644 index 00000000..3faeb6b6 --- /dev/null +++ b/pkg/initializer/ca/config/config.go @@ -0,0 +1,364 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" +) + +type Type string + +const ( + EnrollmentCA Type = "enrollment" + TLSCA Type = "tls" +) + +func (t Type) Is(typ Type) bool { + return t == typ +} + +type InputType string + +var ( + File InputType = "File" + Pem InputType = "Pem" + Base64 InputType = "Base64" + Bccsp InputType = "Bccsp" +) + +var log = logf.Log.WithName("initializer_config") + +type Config struct { + ServerConfig *v1.ServerConfig + HomeDir string + MountPath string + Update bool + SqlitePath string + + tlsCrypto map[string][]byte + dbCrypto map[string][]byte + caCrypto map[string][]byte + operationsCrypto map[string][]byte + intermediateCrypto map[string][]byte +} + +func (c *Config) GetServerConfig() *v1.ServerConfig { + return c.ServerConfig +} + +func (c *Config) GetHomeDir() string { + return c.HomeDir +} + +func (c *Config) GetTLSCrypto() map[string][]byte { + return c.tlsCrypto +} + +func (c *Config) HandleCertInput(input, location string, store map[string][]byte) error { + var err error + inputType := GetInputType(input) + + log.Info(fmt.Sprintf("Handling input of cert type '%s', to be stored at '%s'", inputType, location)) + + data := []byte{} + switch inputType { + case Pem: + data = util.PemStringToBytes(input) + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + case File: + // On an update of config overrides, file is not a valid override value as the operator + // won't have access to it. Cert can only be passed as base64. + if !c.Update { + data, err = util.FileToBytes(input) + if err != nil { + return err + } + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + } + case Base64: + data, err = util.Base64ToBytes(input) + if err != nil { + return err + } + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + case Bccsp: + return nil + default: + return errors.Errorf("invalid input type: %s", input) + } + + if len(data) != 0 { + err := c.EnsureDirAndWriteFile(location, data) + if err != nil { + return err + } + } + + return nil +} + +func (c *Config) EnsureDirAndWriteFile(location string, data []byte) error { + path := filepath.Join(c.HomeDir, location) + err := util.EnsureDir(filepath.Dir(path)) + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Clean(path), data, 0600) + if err != nil { + return err + } + + return nil +} + +func (c *Config) HandleKeyInput(input, location string, store map[string][]byte) error { + var err error + + inputType := GetInputType(input) + + log.Info(fmt.Sprintf("Handling input of key type '%s', to be stored at '%s'", inputType, location)) + + data := []byte{} + switch inputType { + case Pem: + data = util.PemStringToBytes(input) + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + case File: + // On an update of config overrides, file is not a valid override value as the operator + // won't have access to it. Key can only be passed as base64. + if !c.Update { + data, err = util.FileToBytes(input) + if err != nil { + return err + } + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + } + case Base64: + data, err = util.Base64ToBytes(input) + if err != nil { + return err + } + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + case Bccsp: + // If HSM enabled, don't try to read key from file system + if c.UsingPKCS11() { + return nil + } + // On an update of config overrides, reading from keystore is not valid. After init create + // the key stored in a kubernetes secret and operator won't have access to it. + if !c.Update { + data, err = c.GetSigningKey(c.HomeDir) + if err != nil { + return err + } + err = c.StoreInMap(data, location, store) + if err != nil { + return err + } + } + default: + return errors.Errorf("invalid input type: %s", input) + } + + if len(data) != 0 { + err := c.EnsureDirAndWriteFile(location, data) + if err != nil { + return err + } + } + + return nil +} + +func (c *Config) StoreInMap(data []byte, location string, store map[string][]byte) error { + if len(data) == 0 { + return nil + } + + key := ConvertStringForSecrets(location, true) + store[key] = data + return nil +} + +// GetSigningKey applies to non-hsm use cases where the key exists on the filesystem. +// The filesystem is read and then key is then stored in a kubernetes secret. +func (c *Config) GetSigningKey(path string) ([]byte, error) { + + keystoreDir := filepath.Join(path, "msp", "keystore") + files, err := ioutil.ReadDir(keystoreDir) + if err != nil { + return nil, err + } + + if len(files) == 0 { + return nil, fmt.Errorf("no keys found in keystore directory: %s", keystoreDir) + } + + // Need this loop to find appropriate key. Three files are generated + // by default by the CA: IssuerRevocationPrivateKey, IssuerSecretKey, and *_sk + // We are only interested in file ending with 'sk' which the is Private Key + // associated with the x509 certificate + for _, file := range files { + fileBytes, err := ioutil.ReadFile(filepath.Clean(filepath.Join(keystoreDir, file.Name()))) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(fileBytes) + if block == nil { + continue + } + + _, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err == nil { + return fileBytes, nil + } + } + + return nil, errors.Errorf("failed to parse CA's private key") +} + +func (c *Config) SetUpdate(update bool) { + c.Update = update +} + +func (c *Config) SetServerConfig(cfg *v1.ServerConfig) { + c.ServerConfig = cfg +} + +func (c *Config) SetMountPaths(caType Type) { + switch caType { + case EnrollmentCA: + c.CAMountPath() + c.DBMountPath() + c.IntermediateMountPath() + c.OperationsMountPath() + c.TLSMountPath() + case TLSCA: + c.CAMountPath() + c.DBMountPath() + } +} + +func (c *Config) UsingPKCS11() bool { + if c.ServerConfig != nil && c.ServerConfig.CAConfig.CSP != nil { + if strings.ToLower(c.ServerConfig.CAConfig.CSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func GetInputType(input string) InputType { + data := []byte(input) + block, _ := pem.Decode(data) + if block != nil { + return Pem + } + + data, err := util.Base64ToBytes(input) + if err == nil && data != nil { + return Base64 + } + + // If input string is found as an already exisiting file, return CertFile type + _, err = os.Stat(input) + if err == nil { + return File + } + + return Bccsp +} + +func ConvertStringForSecrets(filepath string, forward bool) string { + // shared//tlsca//db/certs/certfile0.pem + if forward { + return strings.Replace(filepath, "/", "_", -1) + } + // data[shared__tlsca__db_certs_certfile0.pem + return strings.Replace(filepath, "_", "/", -1) +} + +func IsValidPostgressDatasource(datasourceStr string) bool { + regexpssions := []string{`host=\S+`, `port=\d+`, `user=\S+`, `password=\S+`, `dbname=\S+`, `sslmode=\S+`} + for _, regexpression := range regexpssions { + re := regexp.MustCompile(regexpression) + matches := len(re.FindStringSubmatch(datasourceStr)) + if matches == 0 { + return false + } + } + return true +} + +func ValidCryptoInput(certFile, keyFile string) error { + if certFile == "" && keyFile != "" { + return errors.New("Key file specified but no corresponding certificate file specified, both must be passed") + } + if certFile != "" && keyFile == "" { + return errors.New("Certificate file specified but no corresponding key file specified, both must be passed") + } + return nil +} + +func ReadFrom(from *[]byte) (*Config, error) { + config := &v1.ServerConfig{} + err := yaml.Unmarshal(*from, config) + if err != nil { + return nil, err + } + + return &Config{ + ServerConfig: config, + }, nil +} diff --git a/pkg/initializer/ca/config/config_suite_test.go b/pkg/initializer/ca/config/config_suite_test.go new file mode 100644 index 00000000..eff0b39f --- /dev/null +++ b/pkg/initializer/ca/config/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Config Suite") +} diff --git a/pkg/initializer/ca/config/config_test.go b/pkg/initializer/ca/config/config_test.go new file mode 100644 index 00000000..8ceb46ed --- /dev/null +++ b/pkg/initializer/ca/config/config_test.go @@ -0,0 +1,112 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "os" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + keyFile = "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdFJBUDlMemUyZEc1cm1rbmcvdVVtREFZU0VwUElqRFdUUDhqUjMxcUJ5Yjc3YWUrCnk3UTRvRnZod1lDVUhsUWVTWjFKeTdUUHpEcitoUk5hdDJYNGdGYUpGYmVFbC9DSHJ3Rk1mNzNzQStWV1pHdnkKdXhtbjB2bEdYMW5zSEo5aUdIUS9qR2FvV1FJYzlVbnpHWi8yWStlZkpxOWd3cDBNemFzWWZkdXordXVBNlp4VAp5TTdDOWFlWmxYL2ZMYmVkSXVXTzVzaXhPSlZQeUVpcWpkd0RiY1AxYy9mRCtSMm1DbmM3VGovSnVLK1poTGxPCnhGcVlFRmtROHBmSi9LY1pabVF1QURZVFh6RGp6OENxcTRTRU5ySzI0b2hQQkN2SGgyanplWjhGdGR4MmpSSFQKaXdCZWZEYWlSWVBSOUM4enk4K1Z2Wmt6S0hQV3N5aENiNUMrN1FJREFRQUJBb0lCQUZROGhzL2IxdW9Mc3BFOApCdEJXaVVsTWh0K0xBc25yWXFncnd5UU5hdmlzNEdRdXVJdFk2MGRmdCtZb2hjQ2ViZ0RkbG1tWlUxdTJ6cGJtCjdEdUt5MVFaN21rV0dpLytEWUlUM3AxSHBMZ2pTRkFzRUorUFRnN1BQamc2UTZrRlZjUCt3Vm4yb0xmWVRkU28KZE5zbEdxSmNMaVQzVHRMNzhlcjFnTTE5RzN6T3J1ZndrSGJSYU1BRmtvZ1ExUlZLSWpnVGUvbmpIMHFHNW9JagoxNEJLeFFKTUZFTG1pQk50NUx5OVMxWWdxTDRjbmNtUDN5L1QyNEdodVhNckx0eTVOeVhnS0dFZ1pUTDMzZzZvCnYreDFFMFRURWRjMVQvWVBGWkdBSXhHdWRKNWZZZ2JtWU9LZ09mUHZFOE9TbEV6OW56aHNnckVZYjdQVThpZDUKTHFycVJRRUNnWUVBNjIyT3RIUmMxaVY1ZXQxdHQydTVTTTlTS2h2b0lPT3d2Q3NnTEI5dDJzNEhRUlRYN0RXcAo0VDNpUC9leEl5OXI3bTIxNFo5MEgzZlpVNElSUkdHSUxKUVMrYzRQNVA4cHJFTDcyd1dIWlpQTTM3QlZTQ1U3CkxOTXl4TkRjeVdjSUJIVFh4NUY2eXhLNVFXWTg5MVB0eDlDamJFSEcrNVJVdDA4UVlMWDlUQTBDZ1lFQXhPSmYKcXFjeThMOVZyYUFVZG9lbGdIU0NGSkJRR3hMRFNSQlJSTkRIOUJhaWlZOCtwZzd2TExTRXFMRFpsbkZPbFkrQQpiRENEQ0RtdHhwRXViY0x6b3FnOXhlQTZ0eXZZWkNWalY5dXVzNVh1Wmk1VDBBUHhCdm56OHNNa3dRY3RQWkRQCk8zQTN4WllkZzJBRmFrV1BmT1FFbjVaK3F4TU13SG9VZ1ZwQkptRUNnWUJ2Q2FjcTJVOEgrWGpJU0ROOU5TT1kKZ1ovaEdIUnRQcmFXcVVodFJ3MkxDMjFFZHM0NExEOUphdVNSQXdQYThuelhZWXROTk9XU0NmYkllaW9tdEZHRApwUHNtTXRnd1MyQ2VUS0Y0OWF5Y2JnOU0yVi8vdlAraDdxS2RUVjAwNkpGUmVNSms3K3FZYU9aVFFDTTFDN0swCmNXVUNwQ3R6Y014Y0FNQmF2THNRNlFLQmdHbXJMYmxEdjUxaXM3TmFKV0Z3Y0MwL1dzbDZvdVBFOERiNG9RV1UKSUowcXdOV2ZvZm95TGNBS3F1QjIrbkU2SXZrMmFiQ25ZTXc3V0w4b0VJa3NodUtYOVgrTVZ6Y1VPekdVdDNyaQpGeU9mcHJJRXowcm5zcWNSNUJJNUZqTGJqVFpyMEMyUWp2NW5FVFAvaHlpQWFRQ1l5THAyWlVtZ0Vjb0VPNWtwClBhcEJBb0dBZVV0WjE0SVp2cVorQnAxR1VqSG9PR0pQVnlJdzhSRUFETjRhZXRJTUlQRWFVaDdjZUtWdVN6VXMKci9WczA1Zjg0cFBVaStuUTUzaGo2ZFhhYTd1UE1aMFBnNFY4cS9UdzJMZ3BWWndVd0ltZUQrcXNsbldha3VWMQpMSnp3SkhOa3pOWE1OMmJWREFZTndSamNRSmhtbzF0V2xHYlpRQjNoSkEwR2thWGZPa2c9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" + certFile = "../../../../testdata/tls/tls.crt" +) + +var _ = Describe("config", func() { + const ( + homeDir = "configtest" + ) + + var cfg *config.Config + + BeforeEach(func() { + cfg = &config.Config{ + ServerConfig: &v1.ServerConfig{ + TLS: v1.ServerTLSConfig{ + Enabled: pointer.True(), + CertFile: certFile, + KeyFile: keyFile, + ClientAuth: v1.ClientAuth{ + CertFiles: []string{"../../../../testdata/tls/tls.crt"}, + }, + }, + }, + HomeDir: homeDir, + } + }) + + BeforeEach(func() { + os.Mkdir(homeDir, 0777) + }) + + AfterEach(func() { + err := os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("get input type", func() { + It("returns base64 type if filepath passed", func() { + inputType := config.GetInputType(keyFile) + Expect(inputType).To(Equal(config.Base64)) + }) + + It("returns cert file type if filepath passed", func() { + inputType := config.GetInputType(certFile) + Expect(inputType).To(Equal(config.File)) + }) + + It("returns unkown type if neither base64 or file passed in", func() { + inputType := config.GetInputType("foo") + Expect(inputType).To(Equal(config.Bccsp)) + }) + }) + + Context("handle configuration", func() { + var crypto map[string][]byte + + BeforeEach(func() { + crypto = map[string][]byte{} + }) + + It("will convert cert to bytes and store in map", func() { + err := cfg.HandleCertInput(certFile, "certname", crypto) + Expect(err).NotTo(HaveOccurred()) + Expect(crypto).NotTo(BeNil()) + + data, keyExists := crypto["certname"] + Expect(keyExists).To(Equal(true)) + Expect(data).NotTo(BeNil()) + }) + + It("will convert key to bytes and store in map", func() { + err := cfg.HandleKeyInput(keyFile, "keyname", crypto) + Expect(err).NotTo(HaveOccurred()) + Expect(crypto).NotTo(BeNil()) + + data, keyExists := crypto["keyname"] + Expect(keyExists).To(Equal(true)) + Expect(data).NotTo(BeNil()) + }) + }) +}) diff --git a/pkg/initializer/ca/config/db.go b/pkg/initializer/ca/config/db.go new file mode 100644 index 00000000..de78e207 --- /dev/null +++ b/pkg/initializer/ca/config/db.go @@ -0,0 +1,135 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" +) + +type DBType string + +var ( + SQLLite DBType = "sqlite3" + Postgres DBType = "postgres" + MySQL DBType = "mysql" +) + +func (c *Config) ParseDBBlock() (map[string][]byte, error) { + dbType := c.ServerConfig.CAConfig.DB.Type + + // Default to sqlite + if dbType == "" { + dbType = "sqlite3" + } + + switch DBType(strings.ToLower(dbType)) { + case SQLLite: + // SQLite generated by operator during initilization is temporary. + // The purpose of initilization is to generate crypto not for user data persistence. + // Using a temporary path suffices for the purpose of sqlite based initilization. + c.ServerConfig.CAConfig.DB.Datasource = "/tmp/db/ca.db" + err := util.EnsureDir(filepath.Dir(c.ServerConfig.CAConfig.DB.Datasource)) + if err != nil { + return nil, err + } + + return nil, nil + case Postgres: + if !c.ServerConfig.CAConfig.DB.TLS.IsEnabled() { + return nil, nil + } + + datasource := c.ServerConfig.CAConfig.DB.Datasource + if datasource == "" { + return nil, errors.Errorf("no datasource string specified for postgres") + } + + if !IsValidPostgressDatasource(datasource) { + return nil, errors.Errorf("datasource for postgres is not valid") + } + + if c.dbCrypto == nil { + c.dbCrypto = map[string][]byte{} + } + + log.Info("Parsing DB block for Postgres database") + certFiles := c.ServerConfig.CAConfig.DB.TLS.CertFiles + for index, certFile := range certFiles { + err := c.HandleCertInput(certFile, fmt.Sprintf("db-certfile%d.pem", index), c.dbCrypto) + if err != nil { + return nil, err + } + certFiles[index] = filepath.Join(c.HomeDir, fmt.Sprintf("db-certfile%d.pem", index)) + } + c.ServerConfig.CAConfig.DB.TLS.CertFiles = certFiles + + certFile := c.ServerConfig.CAConfig.DB.TLS.Client.CertFile + keyFile := c.ServerConfig.CAConfig.DB.TLS.Client.KeyFile + if certFile != "" && keyFile != "" { + log.Info("Client authentication information provided for database connection") + err := c.HandleCertInput(certFile, "db-cert.pem", c.dbCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.DB.TLS.Client.CertFile = filepath.Join(c.HomeDir, "db-cert.pem") + + err = c.HandleKeyInput(keyFile, "db-key.pem", c.dbCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.DB.TLS.Client.KeyFile = filepath.Join(c.HomeDir, "db-key.pem") + } + + return c.dbCrypto, nil + case MySQL: + return nil, errors.New("MySQL is not supported") + } + + return nil, errors.Errorf("database type '%s' is not supported", dbType) +} + +func (c *Config) DBMountPath() { + certFile := c.ServerConfig.CAConfig.DB.TLS.Client.CertFile + keyFile := c.ServerConfig.CAConfig.DB.TLS.Client.KeyFile + + if certFile != "" && keyFile != "" { + c.ServerConfig.CAConfig.DB.TLS.Client.CertFile = filepath.Join(c.MountPath, "db-cert.pem") + c.ServerConfig.CAConfig.DB.TLS.Client.KeyFile = filepath.Join(c.MountPath, "db-key.pem") + } + + certFiles := c.ServerConfig.CAConfig.DB.TLS.CertFiles + for index, _ := range certFiles { + certFiles[index] = filepath.Join(c.MountPath, fmt.Sprintf("db-certfile%d.pem", index)) + } + c.ServerConfig.CAConfig.DB.TLS.CertFiles = certFiles + + dbType := c.ServerConfig.CAConfig.DB.Type + if DBType(strings.ToLower(dbType)) == SQLLite { + if c.SqlitePath != "" { + c.ServerConfig.CAConfig.DB.Datasource = c.SqlitePath + } else { + c.ServerConfig.CAConfig.DB.Datasource = "/data/db/ca.db" + } + } +} diff --git a/pkg/initializer/ca/config/db_test.go b/pkg/initializer/ca/config/db_test.go new file mode 100644 index 00000000..9a6c1533 --- /dev/null +++ b/pkg/initializer/ca/config/db_test.go @@ -0,0 +1,136 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "os" + "path/filepath" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("DB config", func() { + const ( + homeDir = "homedir" + ) + + BeforeEach(func() { + os.Mkdir(homeDir, 0777) + }) + + AfterEach(func() { + err := os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + var cfg *config.Config + + Context("parses DB configuration", func() { + BeforeEach(func() { + cfg = &config.Config{ + ServerConfig: &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: string(config.Postgres), + Datasource: "host=0.0.0.0 port=8080 user=db password=db dbname=fabric sslmode=true", + TLS: v1.ClientTLSConfig{ + Enabled: pointer.True(), + CertFiles: []string{"../../../../testdata/tls/tls.crt"}, + Client: v1.KeyCertFiles{ + CertFile: certFile, + KeyFile: keyFile, + }, + }, + }, + }, + }, + HomeDir: homeDir, + SqlitePath: "/tmp/ca.db", + } + }) + + It("returns an error if invalid database type specified", func() { + cfg.ServerConfig.CAConfig.DB.Type = "couchdb" + _, err := cfg.ParseDBBlock() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("database type 'couchdb' is not supported")) + }) + + It("returns an error if mysql database type specified", func() { + cfg.ServerConfig.CAConfig.DB.Type = string(config.MySQL) + _, err := cfg.ParseDBBlock() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("MySQL is not supported")) + }) + + It("returns no error and an empty map if TLS disabled", func() { + cfg.ServerConfig.CAConfig.DB.TLS.Enabled = pointer.False() + crypto, err := cfg.ParseDBBlock() + Expect(err).NotTo(HaveOccurred()) + Expect(crypto).To(BeNil()) + }) + + It("returns an error if missing datasource", func() { + cfg.ServerConfig.CAConfig.DB.Datasource = "" + _, err := cfg.ParseDBBlock() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no datasource string specified for postgres")) + }) + + It("returns an error if datasource is unexpected format", func() { + cfg.ServerConfig.CAConfig.DB.Datasource = "dbname=testdb" + _, err := cfg.ParseDBBlock() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("datasource for postgres is not valid")) + }) + + It("parses config and returns a map containing all db crypto and updated paths to crypto material", func() { + crypto, err := cfg.ParseDBBlock() + Expect(err).NotTo(HaveOccurred()) + + certData, certKeyExists := crypto["db-cert.pem"] + Expect(certKeyExists).To(Equal(true)) + Expect(certData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.DB.TLS.Client.CertFile).To(Equal(filepath.Join(cfg.HomeDir, "db-cert.pem"))) + + keyData, keyKeyExists := crypto["db-key.pem"] + Expect(keyKeyExists).To(Equal(true)) + Expect(keyData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.DB.TLS.Client.KeyFile).To(Equal(filepath.Join(cfg.HomeDir, "db-key.pem"))) + + clientAuthData, clientAuthCertKeyExists := crypto["db-certfile0.pem"] + Expect(clientAuthCertKeyExists).To(Equal(true)) + Expect(clientAuthData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.DB.TLS.CertFiles[0]).To(Equal(filepath.Join(cfg.HomeDir, "db-certfile0.pem"))) + }) + + It("creates SQLLite database and returns empty crypto map", func() { + cfg.ServerConfig.CAConfig.DB.Type = string(config.SQLLite) + crypto, err := cfg.ParseDBBlock() + Expect(err).NotTo(HaveOccurred()) + Expect(crypto).To(BeNil()) + + os.RemoveAll("dbconfigtest") + }) + }) +}) diff --git a/pkg/initializer/ca/config/intermediate.go b/pkg/initializer/ca/config/intermediate.go new file mode 100644 index 00000000..3f7067b4 --- /dev/null +++ b/pkg/initializer/ca/config/intermediate.go @@ -0,0 +1,76 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "fmt" + "path/filepath" +) + +func (c *Config) ParseIntermediateBlock() (map[string][]byte, error) { + if c.intermediateCrypto == nil { + c.intermediateCrypto = map[string][]byte{} + } + + log.Info("Parsing Intermediate block") + certFiles := c.ServerConfig.CAConfig.Intermediate.TLS.CertFiles + for index, certFile := range certFiles { + err := c.HandleCertInput(certFile, fmt.Sprintf("parent-certfile%d.pem", index), c.intermediateCrypto) + if err != nil { + return nil, err + } + certFiles[index] = filepath.Join(c.HomeDir, fmt.Sprintf("parent-certfile%d.pem", index)) + } + c.ServerConfig.CAConfig.Intermediate.TLS.CertFiles = certFiles + + certFile := c.ServerConfig.CAConfig.Intermediate.TLS.Client.CertFile + keyFile := c.ServerConfig.CAConfig.Intermediate.TLS.Client.KeyFile + if certFile != "" && keyFile != "" { + log.Info("Client authentication information provided for intermediate CA connection") + err := c.HandleCertInput(certFile, "parent-cert.pem", c.intermediateCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.Intermediate.TLS.Client.CertFile = filepath.Join(c.HomeDir, "parent-cert.pem") + + err = c.HandleKeyInput(keyFile, "parent-key.pem", c.intermediateCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.CAConfig.Intermediate.TLS.Client.KeyFile = filepath.Join(c.HomeDir, "parent-key.pem") + } + + return c.intermediateCrypto, nil +} + +func (c *Config) IntermediateMountPath() { + certFile := c.ServerConfig.CAConfig.Intermediate.TLS.Client.CertFile + keyFile := c.ServerConfig.CAConfig.Intermediate.TLS.Client.KeyFile + + if certFile != "" && keyFile != "" { + c.ServerConfig.CAConfig.Intermediate.TLS.Client.CertFile = filepath.Join(c.MountPath, "parent-cert.pem") + c.ServerConfig.CAConfig.Intermediate.TLS.Client.KeyFile = filepath.Join(c.MountPath, "parent-key.pem") + } + + certFiles := c.ServerConfig.CAConfig.Intermediate.TLS.CertFiles + for index, _ := range certFiles { + certFiles[index] = filepath.Join(c.MountPath, fmt.Sprintf("parent-certfile%d.pem", index)) + } + c.ServerConfig.CAConfig.Intermediate.TLS.CertFiles = certFiles +} diff --git a/pkg/initializer/ca/config/intermediate_test.go b/pkg/initializer/ca/config/intermediate_test.go new file mode 100644 index 00000000..71c82762 --- /dev/null +++ b/pkg/initializer/ca/config/intermediate_test.go @@ -0,0 +1,88 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "os" + "path/filepath" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Intermediate config", func() { + var ( + cfg *config.Config + homeDir = "interconfigtest" + ) + + BeforeEach(func() { + os.Mkdir(homeDir, 0777) + }) + + AfterEach(func() { + err := os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("parses intermediate configuration", func() { + BeforeEach(func() { + cfg = &config.Config{ + ServerConfig: &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + Intermediate: v1.IntermediateCA{ + TLS: v1.ClientTLSConfig{ + Enabled: pointer.True(), + CertFiles: []string{certFile}, + Client: v1.KeyCertFiles{ + CertFile: certFile, + KeyFile: keyFile, + }, + }, + }, + }, + }, + HomeDir: homeDir, + } + }) + + It("parses config and returns a map containing all crypto and updated paths to crypto material", func() { + crypto, err := cfg.ParseIntermediateBlock() + Expect(err).NotTo(HaveOccurred()) + + certData, certKeyExists := crypto["parent-cert.pem"] + Expect(certKeyExists).To(Equal(true)) + Expect(certData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.Intermediate.TLS.Client.CertFile).To(Equal(filepath.Join(cfg.HomeDir, "parent-cert.pem"))) + + keyData, keyKeyExists := crypto["parent-key.pem"] + Expect(keyKeyExists).To(Equal(true)) + Expect(keyData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.Intermediate.TLS.Client.KeyFile).To(Equal(filepath.Join(cfg.HomeDir, "parent-key.pem"))) + + chainData, chainKeyExists := crypto["parent-certfile0.pem"] + Expect(chainKeyExists).To(Equal(true)) + Expect(chainData).NotTo(BeNil()) + Expect(cfg.ServerConfig.CAConfig.Intermediate.TLS.CertFiles[0]).To(Equal(filepath.Join(cfg.HomeDir, "parent-certfile0.pem"))) + }) + }) +}) diff --git a/pkg/initializer/ca/config/operations.go b/pkg/initializer/ca/config/operations.go new file mode 100644 index 00000000..088974e8 --- /dev/null +++ b/pkg/initializer/ca/config/operations.go @@ -0,0 +1,81 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "fmt" + "path/filepath" +) + +func (c *Config) ParseOperationsBlock() (map[string][]byte, error) { + if !c.ServerConfig.Operations.TLS.IsEnabled() { + log.Info("TLS disabled for Operations endpoint") + return nil, nil + } + + log.Info("Parsing Operations block") + certFile := c.ServerConfig.Operations.TLS.CertFile + keyFile := c.ServerConfig.Operations.TLS.KeyFile + + // Values for both TLS certfile and keyfile required for Operations configuration. + // TLS key look up is not supported via BCCSP + err := ValidCryptoInput(certFile, keyFile) + if err != nil { + return nil, err + } + + if c.operationsCrypto == nil { + c.operationsCrypto = map[string][]byte{} + } + + err = c.HandleCertInput(certFile, "operations-cert.pem", c.operationsCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.Operations.TLS.CertFile = filepath.Join(c.HomeDir, "operations-cert.pem") + + err = c.HandleKeyInput(keyFile, "operations-key.pem", c.operationsCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.Operations.TLS.KeyFile = filepath.Join(c.HomeDir, "operations-key.pem") + + certFiles := c.ServerConfig.Operations.TLS.ClientCACertFiles + for index, certFile := range certFiles { + err = c.HandleCertInput(certFile, fmt.Sprintf("operations-certfile%d.pem", index), c.operationsCrypto) + if err != nil { + return nil, err + } + certFiles[index] = filepath.Join(c.HomeDir, fmt.Sprintf("operations-certfile%d.pem", index)) + } + c.ServerConfig.Operations.TLS.ClientCACertFiles = certFiles + + return c.operationsCrypto, nil +} + +func (c *Config) OperationsMountPath() { + c.ServerConfig.Operations.TLS.CertFile = filepath.Join(c.MountPath, "operations-cert.pem") + c.ServerConfig.Operations.TLS.KeyFile = filepath.Join(c.MountPath, "operations-key.pem") + + certFiles := c.ServerConfig.Operations.TLS.ClientCACertFiles + for index, _ := range certFiles { + certFiles[index] = filepath.Join(c.MountPath, fmt.Sprintf("operations-certfile%d.pem", index)) + } + c.ServerConfig.Operations.TLS.ClientCACertFiles = certFiles +} diff --git a/pkg/initializer/ca/config/operations_test.go b/pkg/initializer/ca/config/operations_test.go new file mode 100644 index 00000000..d3fe0541 --- /dev/null +++ b/pkg/initializer/ca/config/operations_test.go @@ -0,0 +1,91 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "os" + "path/filepath" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Operations config", func() { + var ( + cfg *config.Config + homeDir = "operationsconfigtest" + ) + + BeforeEach(func() { + os.Mkdir(homeDir, 0777) + }) + + AfterEach(func() { + err := os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("parses Operations configuration", func() { + BeforeEach(func() { + cfg = &config.Config{ + ServerConfig: &v1.ServerConfig{ + Operations: v1.Options{ + TLS: v1.TLS{ + Enabled: pointer.True(), + CertFile: certFile, + KeyFile: keyFile, + ClientCACertFiles: []string{certFile}, + }, + }, + }, + HomeDir: homeDir, + } + }) + + It("returns no error and an empty map if TLS disabled", func() { + cfg.ServerConfig.Operations.TLS.Enabled = pointer.False() + crypto, err := cfg.ParseOperationsBlock() + Expect(err).NotTo(HaveOccurred()) + Expect(crypto).To(BeNil()) + }) + + It("parses config and returns a map containing all crypto and updated paths to crypto material", func() { + crypto, err := cfg.ParseOperationsBlock() + Expect(err).NotTo(HaveOccurred()) + + certData, certKeyExists := crypto["operations-cert.pem"] + Expect(certKeyExists).To(Equal(true)) + Expect(certData).NotTo(BeNil()) + Expect(cfg.ServerConfig.Operations.TLS.CertFile).To(Equal(filepath.Join(cfg.HomeDir, "operations-cert.pem"))) + + keyData, keyKeyExists := crypto["operations-key.pem"] + Expect(keyKeyExists).To(Equal(true)) + Expect(keyData).NotTo(BeNil()) + Expect(cfg.ServerConfig.Operations.TLS.KeyFile).To(Equal(filepath.Join(cfg.HomeDir, "operations-key.pem"))) + + chainData, chainKeyExists := crypto["operations-certfile0.pem"] + Expect(chainKeyExists).To(Equal(true)) + Expect(chainData).NotTo(BeNil()) + Expect(cfg.ServerConfig.Operations.TLS.ClientCACertFiles[0]).To(Equal(filepath.Join(cfg.HomeDir, "operations-certfile0.pem"))) + }) + }) +}) diff --git a/pkg/initializer/ca/config/tls.go b/pkg/initializer/ca/config/tls.go new file mode 100644 index 00000000..254b02a9 --- /dev/null +++ b/pkg/initializer/ca/config/tls.go @@ -0,0 +1,83 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "fmt" + "path/filepath" +) + +func (c *Config) ParseTLSBlock() (map[string][]byte, error) { + if !c.ServerConfig.TLS.IsEnabled() { + log.Info("TLS disabled for Fabric CA server") + return nil, nil + } + + if c.tlsCrypto == nil { + c.tlsCrypto = map[string][]byte{} + } + + log.Info("Parsing TLS block") + + certFile := c.ServerConfig.TLS.CertFile + keyFile := c.ServerConfig.TLS.KeyFile + + // Values for both TLS certfile and keyfile required for Operations configuration. + // TLS key look up is not supported via BCCSP + err := ValidCryptoInput(certFile, keyFile) + if err != nil { + return nil, err + } + + err = c.HandleCertInput(certFile, "tls-cert.pem", c.tlsCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.TLS.CertFile = filepath.Join(c.HomeDir, "tls-cert.pem") + + err = c.HandleKeyInput(keyFile, "tls-key.pem", c.tlsCrypto) + if err != nil { + return nil, err + } + c.ServerConfig.TLS.KeyFile = filepath.Join(c.HomeDir, "tls-key.pem") + + certFiles := c.ServerConfig.TLS.ClientAuth.CertFiles + for index, certFile := range certFiles { + fileLocation := filepath.Join(c.HomeDir, fmt.Sprintf("tls-certfile%d.pem", index)) + err = c.HandleCertInput(certFile, fmt.Sprintf("tls-certfile%d.pem", index), c.tlsCrypto) + if err != nil { + return nil, err + } + certFiles[index] = fileLocation + } + c.ServerConfig.TLS.ClientAuth.CertFiles = certFiles + + return c.tlsCrypto, nil +} + +func (c *Config) TLSMountPath() { + c.ServerConfig.TLS.CertFile = filepath.Join(c.MountPath, "tls-cert.pem") + c.ServerConfig.TLS.KeyFile = filepath.Join(c.MountPath, "tls-key.pem") + + certFiles := c.ServerConfig.TLS.ClientAuth.CertFiles + for index, _ := range certFiles { + certFiles[index] = filepath.Join(c.MountPath, fmt.Sprintf("tls-certfile%d.pem", index)) + } + c.ServerConfig.TLS.ClientAuth.CertFiles = certFiles +} diff --git a/pkg/initializer/ca/config/tls_test.go b/pkg/initializer/ca/config/tls_test.go new file mode 100644 index 00000000..1587b167 --- /dev/null +++ b/pkg/initializer/ca/config/tls_test.go @@ -0,0 +1,91 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" +) + +var _ = Describe("TLS Config", func() { + const ( + homeDir = "configtest" + ) + + Context("parses TLS configuration", func() { + var cfg *config.Config + + BeforeEach(func() { + cfg = &config.Config{ + ServerConfig: &v1.ServerConfig{ + TLS: v1.ServerTLSConfig{ + Enabled: pointer.True(), + CertFile: certFile, + KeyFile: keyFile, + ClientAuth: v1.ClientAuth{ + CertFiles: []string{"../../../../testdata/tls/tls.crt"}, + }, + }, + }, + HomeDir: homeDir, + } + + os.Mkdir(homeDir, 0777) + }) + + AfterEach(func() { + err := os.RemoveAll(homeDir) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns no error and an empty map if TLS disabled", func() { + cfg.ServerConfig.TLS.Enabled = pointer.False() + crypto, err := cfg.ParseTLSBlock() + Expect(err).NotTo(HaveOccurred()) + Expect(crypto).To(BeNil()) + }) + + It("parses config and returns a map containing all crypto and updated paths to crypto material", func() { + crypto, err := cfg.ParseTLSBlock() + Expect(err).NotTo(HaveOccurred()) + + certData, certKeyExists := crypto["tls-cert.pem"] + Expect(certKeyExists).To(Equal(true)) + Expect(certData).NotTo(BeNil()) + Expect(cfg.ServerConfig.TLS.CertFile).To(Equal(filepath.Join(cfg.HomeDir, "tls-cert.pem"))) + + keyData, keyKeyExists := crypto["tls-key.pem"] + Expect(keyKeyExists).To(Equal(true)) + Expect(keyData).NotTo(BeNil()) + Expect(cfg.ServerConfig.TLS.KeyFile).To(Equal(filepath.Join(cfg.HomeDir, "tls-key.pem"))) + + clientAuthData, clientAuthCertKeyExists := crypto["tls-certfile0.pem"] + Expect(clientAuthCertKeyExists).To(Equal(true)) + Expect(clientAuthData).NotTo(BeNil()) + Expect(cfg.ServerConfig.TLS.ClientAuth.CertFiles[0]).To(Equal(filepath.Join(cfg.HomeDir, "tls-certfile0.pem"))) + }) + }) +}) diff --git a/pkg/initializer/ca/hsm.go b/pkg/initializer/ca/hsm.go new file mode 100644 index 00000000..82036252 --- /dev/null +++ b/pkg/initializer/ca/hsm.go @@ -0,0 +1,551 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + caconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + controller "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +// HSMInitJobTimeouts defines timeouts properties +type HSMInitJobTimeouts struct { + JobStart common.Duration `json:"jobStart" yaml:"jobStart"` + JobCompletion common.Duration `json:"jobCompletion" yaml:"jobCompletion"` +} + +// HSM implements the ability to initialize HSM CA +type HSM struct { + Config *config.HSMConfig + Timeouts HSMInitJobTimeouts + Client controller.Client + Scheme *runtime.Scheme +} + +// Create creates the crypto and config materical to initialize an HSM based CA +func (h *HSM) Create(instance *current.IBPCA, overrides *v1.ServerConfig, ca IBPCA) (*Response, error) { + log.Info(fmt.Sprintf("Creating job to initialize ca '%s'", instance.GetName())) + + if err := ca.OverrideServerConfig(overrides); err != nil { + return nil, err + } + + if err := createCACryptoSecret(h.Client, h.Scheme, instance, ca); err != nil { + return nil, err + } + + if err := createCAConfigMap(h.Client, h.Scheme, instance, h.Config.Library.FilePath, ca); err != nil { + return nil, err + } + + dbConfig, err := getDBConfig(instance, ca.GetType()) + if err != nil { + return nil, errors.Wrapf(err, "failed get DB config for CA '%s'", instance.GetName()) + } + + job := initHSMCAJob(instance, h.Config, dbConfig, ca.GetType()) + setPathsOnJob(h.Config, job) + + if err := h.Client.Create(context.TODO(), job, controller.CreateOption{ + Owner: instance, + Scheme: h.Scheme, + }); err != nil { + return nil, errors.Wrap(err, "failed to create HSM ca initialization job") + } + + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + // Wait for job to start and pod to go into running state + if err := h.waitForJobToBeActive(job); err != nil { + return nil, err + } + + status, err := h.waitForJobPodToFinish(job) + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + if status.Phase != corev1.PodSucceeded { + return nil, fmt.Errorf("failed to init '%s' check job '%s' pods for errors", instance.GetName(), job.GetName()) + } + + // For posterity, job is only deleted if successful, not deleting on failure allows logs to be + // available for review. + // + // Don't need to cleanup/delete CACrypto Secret and CAConfig config map created earlier, + // as the job will update these resources. + if err := h.deleteJob(job); err != nil { + return nil, err + } + + if ca.GetType().Is(caconfig.EnrollmentCA) { + if err := updateCAConfigMap(h.Client, h.Scheme, instance, ca); err != nil { + return nil, errors.Wrapf(err, "failed to update CA configmap for CA %s", instance.GetName()) + } + } + + return nil, nil +} + +func createCACryptoSecret(client controller.Client, scheme *runtime.Scheme, instance *current.IBPCA, ca IBPCA) error { + crypto, err := ca.ParseCrypto() + if err != nil { + return err + } + + var name string + switch ca.GetType() { + case caconfig.EnrollmentCA: + name = fmt.Sprintf("%s-ca-crypto", instance.GetName()) + case caconfig.TLSCA: + name = fmt.Sprintf("%s-tlsca-crypto", instance.GetName()) + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + }, + Data: crypto, + } + + if err := client.Create(context.TODO(), secret, controller.CreateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrap(err, "failed to create initialization crypto secret") + } + + return nil +} + +func createCAConfigMap(client controller.Client, scheme *runtime.Scheme, instance *current.IBPCA, library string, ca IBPCA) error { + serverConfig := ca.GetServerConfig() + serverConfig.CAConfig.CSP.PKCS11.Library = filepath.Join("/hsm/lib", filepath.Base(library)) + + ca.SetMountPaths() + configBytes, err := ca.ConfigToBytes() + if err != nil { + return err + } + + var name string + switch ca.GetType() { + case caconfig.EnrollmentCA: + name = fmt.Sprintf("%s-ca-config", instance.GetName()) + case caconfig.TLSCA: + name = fmt.Sprintf("%s-tlsca-config", instance.GetName()) + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "IBPCA", + APIVersion: "ibp.com/v1beta1", + Name: instance.GetName(), + UID: instance.GetUID(), + }, + }, + }, + BinaryData: map[string][]byte{ + "fabric-ca-server-config.yaml": configBytes, + }, + } + + if err := client.Create(context.TODO(), cm, controller.CreateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrap(err, "failed to create initialization config map secret") + } + + return nil +} + +func updateCAConfigMap(client controller.Client, scheme *runtime.Scheme, instance *current.IBPCA, ca IBPCA) error { + serverConfig := ca.GetServerConfig() + serverConfig.CAfiles = []string{"/data/tlsca/fabric-ca-server-config.yaml"} + + configBytes, err := ca.ConfigToBytes() + if err != nil { + return err + } + + name := fmt.Sprintf("%s-ca-config", instance.GetName()) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: instance.GetLabels(), + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "IBPCA", + APIVersion: "ibp.com/v1beta1", + Name: instance.GetName(), + UID: instance.GetUID(), + }, + }, + }, + BinaryData: map[string][]byte{ + "fabric-ca-server-config.yaml": configBytes, + }, + } + + if err := client.Update(context.TODO(), cm, controller.UpdateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrapf(err, "failed to update config map '%s'", name) + } + + return nil +} + +func (h *HSM) waitForJobToBeActive(job *batchv1.Job) error { + err := wait.Poll(2*time.Second, h.Timeouts.JobStart.Duration, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for job '%s' to start", job.GetName())) + + j := &batchv1.Job{} + err := h.Client.Get(context.TODO(), types.NamespacedName{ + Name: job.GetName(), + Namespace: job.GetNamespace(), + }, j) + if err != nil { + return false, nil + } + + if j.Status.Active >= int32(1) { + return true, nil + } + + return false, nil + }) + if err != nil { + return errors.Wrap(err, "job failed to start") + } + return nil +} + +func (h *HSM) waitForJobPodToFinish(job *batchv1.Job) (*corev1.PodStatus, error) { + var err error + var status *corev1.PodStatus + + err = wait.Poll(2*time.Second, h.Timeouts.JobCompletion.Duration, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for job pod '%s' to finish", job.GetName())) + + status, err = h.podStatus(job) + if err != nil { + log.Info(fmt.Sprintf("job pod err: %s", err)) + return false, nil + } + + if status.Phase == corev1.PodFailed || status.Phase == corev1.PodSucceeded { + return true, nil + } + + return false, nil + }) + if err != nil { + return nil, errors.Wrapf(err, "pod for job '%s' failed to finish", job.GetName()) + } + + return status, nil +} + +func (h *HSM) podStatus(job *batchv1.Job) (*corev1.PodStatus, error) { + labelSelector, err := labels.Parse(fmt.Sprintf("job-name=%s", job.GetName())) + if err != nil { + return nil, err + } + + opts := &k8sclient.ListOptions{ + LabelSelector: labelSelector, + } + + pods := &corev1.PodList{} + if err := h.Client.List(context.TODO(), pods, opts); err != nil { + return nil, err + } + + if len(pods.Items) != 1 { + return nil, errors.New("incorrect number of job pods found") + } + + for _, pod := range pods.Items { + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Waiting != nil || containerStatus.State.Running != nil { + return &pod.Status, nil + } + } + + return &pod.Status, nil + } + + return nil, errors.New("unable to get pod status") +} + +func (h *HSM) deleteJob(job *batchv1.Job) error { + if err := h.Client.Delete(context.TODO(), job); err != nil { + return err + } + + // TODO: Need to investigate why job is not adding controller reference to job pod, + // this manual cleanup should not be required + podList := &corev1.PodList{} + if err := h.Client.List(context.TODO(), podList, k8sclient.MatchingLabels{"job-name": job.Name}); err != nil { + return errors.Wrap(err, "failed to list job pods") + } + + for _, pod := range podList.Items { + podListItem := pod + if err := h.Client.Delete(context.TODO(), &podListItem); err != nil { + return errors.Wrapf(err, "failed to delete pod '%s'", podListItem.Name) + } + } + + return nil +} + +func setPathsOnJob(hsmConfig *config.HSMConfig, job *batchv1.Job) { + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, hsmConfig.GetVolumes()...) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, hsmConfig.GetVolumeMounts()...) +} + +func getDBConfig(instance *current.IBPCA, caType caconfig.Type) (*v1.CAConfigDB, error) { + var rawMessage *[]byte + switch caType { + case caconfig.EnrollmentCA: + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.CA != nil { + rawMessage = &instance.Spec.ConfigOverride.CA.Raw + } + case caconfig.TLSCA: + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.TLSCA != nil { + rawMessage = &instance.Spec.ConfigOverride.TLSCA.Raw + } + } + + if rawMessage == nil { + return &v1.CAConfigDB{}, nil + } + + caOverrides := &v1.ServerConfig{} + err := yaml.Unmarshal(*rawMessage, caOverrides) + if err != nil { + return nil, err + } + + return caOverrides.CAConfig.DB, nil +} + +func initHSMCAJob(instance *current.IBPCA, hsmConfig *config.HSMConfig, dbConfig *v1.CAConfigDB, caType caconfig.Type) *batchv1.Job { + var typ string + + switch caType { + case caconfig.EnrollmentCA: + typ = "ca" + case caconfig.TLSCA: + typ = "tlsca" + } + + cryptoMountPath := fmt.Sprintf("/crypto/%s", typ) + homeDir := fmt.Sprintf("/tmp/data/%s/%s", instance.GetName(), typ) + secretName := fmt.Sprintf("%s-%s-crypto", instance.GetName(), typ) + jobName := fmt.Sprintf("%s-%s-init", instance.GetName(), typ) + + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + f := false + user := int64(0) + backoffLimit := int32(0) + mountPath := "/shared" + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "name": jobName, + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: instance.GetName(), + ImagePullSecrets: util.AppendImagePullSecretIfMissing(instance.GetPullSecrets(), hsmConfig.BuildPullSecret()), + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + corev1.Container{ + Name: "hsm-client", + Image: hsmConfig.Library.Image, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Containers: []corev1.Container{ + corev1.Container{ + Name: "init", + Image: image.Format( + instance.Spec.Images.EnrollerImage, + instance.Spec.Images.EnrollerTag, + ), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("/usr/local/bin/enroller ca %s %s %s %s %s %s", instance.GetName(), instance.GetNamespace(), homeDir, cryptoMountPath, secretName, caType), + }, + Env: hsmConfig.GetEnvs(), + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + }, + corev1.VolumeMount{ + Name: "caconfig", + MountPath: fmt.Sprintf("/tmp/data/%s/%s/fabric-ca-server-config.yaml", instance.GetName(), typ), + SubPath: "fabric-ca-server-config.yaml", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + corev1.Volume{ + Name: "caconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-%s-config", instance.GetName(), typ), + }, + }, + }, + }, + }, + }, + }, + }, + } + + if dbConfig == nil { + return job + } + + // If using postgres with TLS enabled need to mount trusted root TLS certificate for databae server + if strings.ToLower(dbConfig.Type) == "postgres" { + if dbConfig.TLS.IsEnabled() { + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: "cacrypto", + MountPath: fmt.Sprintf("/crypto/%s/db-certfile0.pem", typ), + SubPath: "db-certfile0.pem", + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: "cacrypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%s-crypto", instance.GetName(), typ), + Items: []corev1.KeyToPath{ + corev1.KeyToPath{ + Key: "db-certfile0.pem", + Path: "db-certfile0.pem", + }, + }, + }, + }, + }, + ) + } + } + + return job +} diff --git a/pkg/initializer/ca/hsm_test.go b/pkg/initializer/ca/hsm_test.go new file mode 100644 index 00000000..ec785d04 --- /dev/null +++ b/pkg/initializer/ca/hsm_test.go @@ -0,0 +1,322 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + caconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("HSM CA initializer", func() { + var ( + client *mocks.Client + ca *mocks.IBPCA + // defaultConfig *mocks.CAConfig + + hsmca *initializer.HSM + instance *current.IBPCA + ) + + BeforeEach(func() { + client = &mocks.Client{ + GetStub: func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + j := obj.(*batchv1.Job) + j.Status.Active = int32(1) + j.Name = "test-job" + } + return nil + }, + ListStub: func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{ + { + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + } + } + return nil + }, + } + + hsmConfig := &config.HSMConfig{ + Type: "hsm", + Version: "v1", + Library: config.Library{ + FilePath: "/usr/lib/libCryptoki2_64.so", + Image: "ghcr.io/ibm-blockchain/gemalto-client:skarim-amd64", + Auth: &config.Auth{ + ImagePullSecret: "hsmpullsecret", + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "DUMMY_ENV_NAME", + Value: "DUMMY_ENV_VALUE", + }, + }, + MountPaths: []config.MountPath{ + { + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + }, + }, + { + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + }, + } + + ca = &mocks.IBPCA{} + ca.GetServerConfigReturns(&v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CSP: &v1.BCCSP{ + PKCS11: &v1.PKCS11Opts{}, + }, + }, + }) + ca.GetTypeReturns(caconfig.EnrollmentCA) + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ibpca", + }, + Spec: current.IBPCASpec{ + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{}, + Init: &corev1.ResourceRequirements{}, + }, + Images: ¤t.CAImages{}, + }, + } + + hsmca = &initializer.HSM{ + Config: hsmConfig, + Timeouts: initializer.HSMInitJobTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }, + Client: client, + } + }) + + Context("creates", func() { + It("returns error if overriding server config fails", func() { + ca.OverrideServerConfigReturns(errors.New("override failed")) + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError("override failed")) + }) + + It("returns error if creating ca crypto secret fails", func() { + client.CreateReturnsOnCall(0, errors.New("failed to create crypto secret")) + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("failed to create crypto secret"))) + }) + + It("returns error if creating ca config map fails", func() { + client.CreateReturnsOnCall(1, errors.New("failed to create config map")) + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("failed to create config map"))) + }) + + It("returns error if creating job fails", func() { + client.CreateReturnsOnCall(2, errors.New("failed to create job")) + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("failed to create job"))) + }) + + Context("job start timeout", func() { + BeforeEach(func() { + client.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + j := obj.(*batchv1.Job) + j.Status.Active = int32(0) + j.Name = "test-job" + + } + return nil + } + }) + + It("returns error if job doesn't start before timeout", func() { + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("job failed to start"))) + }) + }) + + Context("job fails", func() { + When("job timesout", func() { + BeforeEach(func() { + client.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{} + } + return nil + } + }) + + It("returns error", func() { + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("failed to finish"))) + }) + }) + + When("pod enters failed state", func() { + BeforeEach(func() { + client.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{{ + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + }, + }} + } + return nil + } + }) + + It("returns error", func() { + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring(fmt.Sprintf("check job '%s' pods for errors", instance.GetName()+"-ca-init")))) + }) + }) + }) + + It("returns error if unable to delete job after success", func() { + client.DeleteReturns(errors.New("failed to delete job")) + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("failed to delete job"))) + }) + + It("returns error if unable to update ca config map", func() { + client.UpdateReturns(errors.New("failed to update ca config map")) + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).To(MatchError(ContainSubstring("failed to update ca config map"))) + }) + + It("returns sucessfully with no error and nil response", func() { + _, err := hsmca.Create(instance, &v1.ServerConfig{}, ca) + Expect(err).NotTo(HaveOccurred()) + + By("creating a job resource", func() { + _, obj, _ := client.CreateArgsForCall(2) + Expect(obj).NotTo(BeNil()) + + job := obj.(*batchv1.Job) + Expect(job.Spec.Template.Spec.Containers[0].Env).To(ContainElements( + corev1.EnvVar{ + Name: "DUMMY_ENV_NAME", + Value: "DUMMY_ENV_VALUE", + }, + )) + + Expect(job.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElements([]corev1.VolumeMount{ + { + Name: "hsmcrypto", + MountPath: "/hsm", + }, + { + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + })) + + Expect(job.Spec.Template.Spec.Volumes).To(ContainElements([]corev1.Volume{ + { + Name: "hsmconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + }, + }, + }, + { + Name: "hsmcrypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + Items: []corev1.KeyToPath{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + }, + }, + }, + }, + })) + }) + + By("deleting completed job", func() { + // One delete count to delete job and second delete count to delete associated pod + Expect(client.DeleteCallCount()).To(Equal(2)) + }) + + By("updating config map if enrollment CA", func() { + Expect(client.UpdateCallCount()).To(Equal(1)) + }) + }) + }) +}) diff --git a/pkg/initializer/ca/hsmdaemon.go b/pkg/initializer/ca/hsmdaemon.go new file mode 100644 index 00000000..d3d33e06 --- /dev/null +++ b/pkg/initializer/ca/hsmdaemon.go @@ -0,0 +1,325 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + caconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + controller "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// HSMDaemon implements the ability to initialize HSM Daemon based CA +type HSMDaemon struct { + Config *config.HSMConfig + Scheme *runtime.Scheme + Timeouts HSMInitJobTimeouts + Client controller.Client +} + +// Create creates the crypto and config materical to initialize an HSM based CA +func (h *HSMDaemon) Create(instance *current.IBPCA, overrides *v1.ServerConfig, ca IBPCA) (*Response, error) { + log.Info(fmt.Sprintf("Creating job to initialize ca '%s'", instance.GetName())) + + if err := ca.OverrideServerConfig(overrides); err != nil { + return nil, err + } + + if err := createCACryptoSecret(h.Client, h.Scheme, instance, ca); err != nil { + return nil, err + } + + if err := createCAConfigMap(h.Client, h.Scheme, instance, h.Config.Library.FilePath, ca); err != nil { + return nil, err + } + + dbConfig, err := getDBConfig(instance, ca.GetType()) + if err != nil { + return nil, errors.Wrapf(err, "failed get DB config for CA '%s'", instance.GetName()) + } + + job := h.initHSMCAJob(instance, dbConfig, ca.GetType()) + if err := h.Client.Create(context.TODO(), job.Job, controller.CreateOption{ + Owner: instance, + Scheme: h.Scheme, + }); err != nil { + return nil, errors.Wrap(err, "failed to create HSM ca initialization job") + } + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + if err := job.WaitUntilActive(h.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' active", job.GetName())) + + if err := job.WaitUntilContainerFinished(h.Client, CertGen); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + status, err := job.ContainerStatus(h.Client, CertGen) + if err != nil { + return nil, err + } + + switch status { + case jobv1.FAILED: + return nil, fmt.Errorf("Job '%s' finished unsuccessfully, not cleaning up pods to allow for error evaluation", job.GetName()) + case jobv1.COMPLETED: + // For posterity, job is only deleted if successful, not deleting on failure allows logs to be + // examined. + if err := job.Delete(h.Client); err != nil { + return nil, err + } + } + + if ca.GetType().Is(caconfig.EnrollmentCA) { + if err := updateCAConfigMap(h.Client, h.Scheme, instance, ca); err != nil { + return nil, errors.Wrapf(err, "failed to update CA configmap for CA %s", instance.GetName()) + } + } + + return nil, nil +} + +const ( + // HSMClient is the name of container that contain the HSM client library + HSMClient = "hsm-client" + // CertGen is the name of container that runs the command to generate the certificate for the CA + CertGen = "certgen" +) + +func (h *HSMDaemon) initHSMCAJob(instance *current.IBPCA, dbConfig *v1.CAConfigDB, caType caconfig.Type) *jobv1.Job { + var typ string + + switch caType { + case caconfig.EnrollmentCA: + typ = "ca" + case caconfig.TLSCA: + typ = "tlsca" + } + + cryptoMountPath := fmt.Sprintf("/crypto/%s", typ) + homeDir := fmt.Sprintf("/tmp/data/%s/%s", instance.GetName(), typ) + secretName := fmt.Sprintf("%s-%s-crypto", instance.GetName(), typ) + + jobName := fmt.Sprintf("%s-%s-init", instance.GetName(), typ) + + hsmLibraryPath := h.Config.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + t := true + user := int64(1000) + root := int64(0) + backoffLimit := int32(0) + mountPath := "/shared" + pvcVolumeName := "fabric-ca" + + batchJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "name": jobName, + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: instance.GetName(), + ImagePullSecrets: util.AppendImagePullSecretIfMissing(instance.GetPullSecrets(), h.Config.BuildPullSecret()), + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + { + Name: HSMClient, + Image: h.Config.Library.Image, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &t, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: instance.GetResource("init"), + }, + }, + Containers: []corev1.Container{ + { + Name: CertGen, + Image: image.Format( + instance.Spec.Images.EnrollerImage, + instance.Spec.Images.EnrollerTag, + ), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &root, + Privileged: &t, + AllowPrivilegeEscalation: &t, + }, + Command: []string{ + "sh", + "-c", + }, + Args: []string{ + fmt.Sprintf(config.DAEMON_CHECK_CMD+" && /usr/local/bin/enroller ca %s %s %s %s %s %s", instance.GetName(), instance.GetNamespace(), homeDir, cryptoMountPath, secretName, caType), + }, + Env: h.Config.GetEnvs(), + Resources: instance.GetResource(current.ENROLLER), + VolumeMounts: []corev1.VolumeMount{ + { + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + }, + { + Name: "shared", + MountPath: "/shared", + }, + { + Name: "caconfig", + MountPath: fmt.Sprintf("/tmp/data/%s/%s/fabric-ca-server-config.yaml", instance.GetName(), typ), + SubPath: "fabric-ca-server-config.yaml", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + { + Name: "caconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-%s-config", instance.GetName(), typ), + }, + }, + }, + }, + { + Name: pvcVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.PVCName(), + }, + }, + }, + }, + }, + }, + }, + } + job := jobv1.New(batchJob, &jobv1.Timeouts{ + WaitUntilActive: h.Timeouts.JobStart.Get(), + WaitUntilFinished: h.Timeouts.JobCompletion.Get(), + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, h.Config.GetVolumes()...) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, h.Config.GetVolumeMounts()...) + + if dbConfig != nil { + // If using postgres with TLS enabled need to mount trusted root TLS certificate for database server + if strings.ToLower(dbConfig.Type) == "postgres" { + if dbConfig.TLS.IsEnabled() { + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: "cacrypto", + MountPath: fmt.Sprintf("/crypto/%s/db-certfile0.pem", typ), + SubPath: "db-certfile0.pem", + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: "cacrypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%s-crypto", instance.GetName(), typ), + Items: []corev1.KeyToPath{ + corev1.KeyToPath{ + Key: "db-certfile0.pem", + Path: "db-certfile0.pem", + }, + }, + }, + }, + }, + ) + } + } + } + + // If daemon settings are configured in HSM config, create a sidecar that is running the daemon image + if h.Config.Daemon != nil { + // Certain token information requires to be stored in persistent store, the administrator + // responsible for configuring HSM sets the HSM config to point to the path where the PVC + // needs to be mounted. + var pvcMount *corev1.VolumeMount + for _, vm := range h.Config.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: pvcVolumeName, + MountPath: vm.MountPath, + } + } + } + + // Add daemon container to the deployment + config.AddDaemonContainer(h.Config, job, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the CertGen container + if pvcMount != nil { + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, *pvcMount) + } + } + + return job +} diff --git a/pkg/initializer/ca/initializer.go b/pkg/initializer/ca/initializer.go new file mode 100644 index 00000000..d2949f7b --- /dev/null +++ b/pkg/initializer/ca/initializer.go @@ -0,0 +1,137 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "github.com/hyperledger/fabric-ca/lib" + "k8s.io/apimachinery/pkg/runtime" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("ca_initializer") + +type Config struct { + SharedPath string `json:"sharedPath"` + CADefaultConfigPath string `json:"cadefaultconfigpath"` + TLSCADefaultConfigPath string `json:"tlscadefaultconfigpath"` + CAOverrideConfigPath string `json:"caoverrideconfigpath"` + TLSCAOverrideConfigPath string `json:"tlscaoverrideconfigpath"` + DeploymentFile string + PVCFile string + ServiceFile string + RoleFile string + ServiceAccountFile string + RoleBindingFile string + ConfigMapFile string + IngressFile string + Ingressv1beta1File string + RouteFile string +} + +type ConfigOptions struct { + DefaultPath string `json:"defaultpath"` + OverridePath string `json:"overridepath"` +} + +type Response struct { + Config *v1.ServerConfig + CryptoMap map[string][]byte +} + +//go:generate counterfeiter -o mocks/ibpca.go -fake-name IBPCA . IBPCA + +type IBPCA interface { + OverrideServerConfig(newConfig *v1.ServerConfig) (err error) + ViperUnmarshal(configFile string) (*lib.ServerConfig, error) + ParseCrypto() (map[string][]byte, error) + ParseCABlock() (map[string][]byte, error) + GetServerConfig() *v1.ServerConfig + WriteConfig() (err error) + RemoveHomeDir() error + IsBeingUpdated() + ConfigToBytes() ([]byte, error) + GetHomeDir() string + Init() (err error) + SetMountPaths() + GetType() config.Type +} + +type Initializer struct { + Timeouts HSMInitJobTimeouts + Client k8sclient.Client + Scheme *runtime.Scheme +} + +func (i *Initializer) Create(instance *current.IBPCA, overrides *v1.ServerConfig, ca IBPCA) (*Response, error) { + type Create interface { + Create(instance *current.IBPCA, overrides *v1.ServerConfig, ca IBPCA) (*Response, error) + } + + var initializer Create + if instance.IsHSMEnabledForType(ca.GetType()) { + if instance.UsingHSMProxy() { + // If Using HSM Proxy, currently sticking with old way of initialization which is within the operator process + // and not a kuberenetes job + initializer = &SW{} + } else { + hsmConfig, err := commonconfig.ReadHSMConfig(i.Client, instance) + if err != nil { + return nil, err + } + + if hsmConfig.Daemon != nil { + initializer = &HSMDaemon{Client: i.Client, Timeouts: i.Timeouts, Config: hsmConfig} + } else { + initializer = &HSM{Client: i.Client, Timeouts: i.Timeouts, Config: hsmConfig} + } + } + } else { + initializer = &SW{} + } + + return initializer.Create(instance, overrides, ca) +} + +func (i *Initializer) Update(instance *current.IBPCA, overrides *v1.ServerConfig, ca IBPCA) (*Response, error) { + ca.IsBeingUpdated() + + err := ca.OverrideServerConfig(overrides) + if err != nil { + return nil, err + } + + crypto, err := ca.ParseCrypto() + if err != nil { + return nil, err + } + + ca.SetMountPaths() + + return &Response{ + Config: ca.GetServerConfig(), + CryptoMap: crypto, + }, nil +} diff --git a/pkg/initializer/ca/initializer_test.go b/pkg/initializer/ca/initializer_test.go new file mode 100644 index 00000000..ad295334 --- /dev/null +++ b/pkg/initializer/ca/initializer_test.go @@ -0,0 +1,120 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +var _ = Describe("Initializing the CA before start up", func() { + var ( + init *initializer.Initializer + ca *mocks.IBPCA + ) + + BeforeEach(func() { + ca = &mocks.IBPCA{} + init = &initializer.Initializer{} + }) + + Context("create", func() { + It("returns an error if unable to override server config", func() { + msg := "failed to override" + ca.OverrideServerConfigReturns(errors.New(msg)) + _, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to write config", func() { + msg := "failed to write config" + ca.ParseCryptoReturns(nil, errors.New(msg)) + _, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to write config", func() { + msg := "failed to parse crypto" + ca.WriteConfigReturns(errors.New(msg)) + _, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to init", func() { + msg := "failed to init" + ca.InitReturns(errors.New(msg)) + _, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to parse ca block", func() { + msg := "failed to parse ca block" + ca.ParseCABlockReturns(nil, errors.New(msg)) + _, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to remove home directory", func() { + msg := "failed to remove home directory" + ca.RemoveHomeDirReturns(errors.New(msg)) + _, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns a response containing server config and map contains all crypto material", func() { + result, err := init.Create(nil, &v1.ServerConfig{}, ca) + Expect(err).NotTo(HaveOccurred()) + Expect(result).NotTo(Equal(nil)) + }) + }) + + Context("update", func() { + It("returns an error if unable to override server config", func() { + msg := "failed to override" + ca.OverrideServerConfigReturns(errors.New(msg)) + _, err := init.Update(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to parse crypto", func() { + msg := "failed to parse crypto" + ca.ParseCryptoReturns(nil, errors.New(msg)) + _, err := init.Update(nil, &v1.ServerConfig{}, ca) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns a response containing server config and map contains all crypto material", func() { + result, err := init.Update(nil, &v1.ServerConfig{}, ca) + Expect(err).NotTo(HaveOccurred()) + Expect(result).NotTo(Equal(nil)) + }) + }) +}) diff --git a/pkg/initializer/ca/mocks/client.go b/pkg/initializer/ca/mocks/client.go new file mode 100644 index 00000000..ee14505d --- /dev/null +++ b/pkg/initializer/ca/mocks/client.go @@ -0,0 +1,746 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Client struct { + CreateStub func(context.Context, client.Object, ...controllerclient.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + CreateOrUpdateStub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + PatchStatusStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchStatusMutex sync.RWMutex + patchStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchStatusReturns struct { + result1 error + } + patchStatusReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...controllerclient.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + UpdateStatusStub func(context.Context, client.Object, ...client.UpdateOption) error + updateStatusMutex sync.RWMutex + updateStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateStatusReturns struct { + result1 error + } + updateStatusReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) Create(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Client) CreateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Client) CreateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdate(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOrUpdateOption) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *Client) CreateOrUpdateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *Client) CreateOrUpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOrUpdateOption) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *Client) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *Client) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Client) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Client) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Client) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Client) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *Client) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *Client) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatus(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchStatusMutex.Lock() + ret, specificReturn := fake.patchStatusReturnsOnCall[len(fake.patchStatusArgsForCall)] + fake.patchStatusArgsForCall = append(fake.patchStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStatusStub + fakeReturns := fake.patchStatusReturns + fake.recordInvocation("PatchStatus", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchStatusCallCount() int { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + return len(fake.patchStatusArgsForCall) +} + +func (fake *Client) PatchStatusCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = stub +} + +func (fake *Client) PatchStatusArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + argsForCall := fake.patchStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchStatusReturns(result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + fake.patchStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatusReturnsOnCall(i int, result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + if fake.patchStatusReturnsOnCall == nil { + fake.patchStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Update(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Client) UpdateCalls(stub func(context.Context, client.Object, ...controllerclient.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Client) UpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatus(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateStatusMutex.Lock() + ret, specificReturn := fake.updateStatusReturnsOnCall[len(fake.updateStatusArgsForCall)] + fake.updateStatusArgsForCall = append(fake.updateStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStatusStub + fakeReturns := fake.updateStatusReturns + fake.recordInvocation("UpdateStatus", []interface{}{arg1, arg2, arg3}) + fake.updateStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateStatusCallCount() int { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + return len(fake.updateStatusArgsForCall) +} + +func (fake *Client) UpdateStatusCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = stub +} + +func (fake *Client) UpdateStatusArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + argsForCall := fake.updateStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateStatusReturns(result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + fake.updateStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatusReturnsOnCall(i int, result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + if fake.updateStatusReturnsOnCall == nil { + fake.updateStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ controllerclient.Client = new(Client) diff --git a/pkg/initializer/ca/mocks/config.go b/pkg/initializer/ca/mocks/config.go new file mode 100644 index 00000000..21138015 --- /dev/null +++ b/pkg/initializer/ca/mocks/config.go @@ -0,0 +1,701 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" +) + +type CAConfig struct { + GetHomeDirStub func() string + getHomeDirMutex sync.RWMutex + getHomeDirArgsForCall []struct { + } + getHomeDirReturns struct { + result1 string + } + getHomeDirReturnsOnCall map[int]struct { + result1 string + } + GetServerConfigStub func() *v1.ServerConfig + getServerConfigMutex sync.RWMutex + getServerConfigArgsForCall []struct { + } + getServerConfigReturns struct { + result1 *v1.ServerConfig + } + getServerConfigReturnsOnCall map[int]struct { + result1 *v1.ServerConfig + } + ParseCABlockStub func() (map[string][]byte, error) + parseCABlockMutex sync.RWMutex + parseCABlockArgsForCall []struct { + } + parseCABlockReturns struct { + result1 map[string][]byte + result2 error + } + parseCABlockReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + ParseDBBlockStub func() (map[string][]byte, error) + parseDBBlockMutex sync.RWMutex + parseDBBlockArgsForCall []struct { + } + parseDBBlockReturns struct { + result1 map[string][]byte + result2 error + } + parseDBBlockReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + ParseIntermediateBlockStub func() (map[string][]byte, error) + parseIntermediateBlockMutex sync.RWMutex + parseIntermediateBlockArgsForCall []struct { + } + parseIntermediateBlockReturns struct { + result1 map[string][]byte + result2 error + } + parseIntermediateBlockReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + ParseOperationsBlockStub func() (map[string][]byte, error) + parseOperationsBlockMutex sync.RWMutex + parseOperationsBlockArgsForCall []struct { + } + parseOperationsBlockReturns struct { + result1 map[string][]byte + result2 error + } + parseOperationsBlockReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + ParseTLSBlockStub func() (map[string][]byte, error) + parseTLSBlockMutex sync.RWMutex + parseTLSBlockArgsForCall []struct { + } + parseTLSBlockReturns struct { + result1 map[string][]byte + result2 error + } + parseTLSBlockReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + SetMountPathsStub func(config.Type) + setMountPathsMutex sync.RWMutex + setMountPathsArgsForCall []struct { + arg1 config.Type + } + SetServerConfigStub func(*v1.ServerConfig) + setServerConfigMutex sync.RWMutex + setServerConfigArgsForCall []struct { + arg1 *v1.ServerConfig + } + SetUpdateStub func(bool) + setUpdateMutex sync.RWMutex + setUpdateArgsForCall []struct { + arg1 bool + } + UsingPKCS11Stub func() bool + usingPKCS11Mutex sync.RWMutex + usingPKCS11ArgsForCall []struct { + } + usingPKCS11Returns struct { + result1 bool + } + usingPKCS11ReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CAConfig) GetHomeDir() string { + fake.getHomeDirMutex.Lock() + ret, specificReturn := fake.getHomeDirReturnsOnCall[len(fake.getHomeDirArgsForCall)] + fake.getHomeDirArgsForCall = append(fake.getHomeDirArgsForCall, struct { + }{}) + stub := fake.GetHomeDirStub + fakeReturns := fake.getHomeDirReturns + fake.recordInvocation("GetHomeDir", []interface{}{}) + fake.getHomeDirMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAConfig) GetHomeDirCallCount() int { + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + return len(fake.getHomeDirArgsForCall) +} + +func (fake *CAConfig) GetHomeDirCalls(stub func() string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = stub +} + +func (fake *CAConfig) GetHomeDirReturns(result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + fake.getHomeDirReturns = struct { + result1 string + }{result1} +} + +func (fake *CAConfig) GetHomeDirReturnsOnCall(i int, result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + if fake.getHomeDirReturnsOnCall == nil { + fake.getHomeDirReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getHomeDirReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CAConfig) GetServerConfig() *v1.ServerConfig { + fake.getServerConfigMutex.Lock() + ret, specificReturn := fake.getServerConfigReturnsOnCall[len(fake.getServerConfigArgsForCall)] + fake.getServerConfigArgsForCall = append(fake.getServerConfigArgsForCall, struct { + }{}) + stub := fake.GetServerConfigStub + fakeReturns := fake.getServerConfigReturns + fake.recordInvocation("GetServerConfig", []interface{}{}) + fake.getServerConfigMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAConfig) GetServerConfigCallCount() int { + fake.getServerConfigMutex.RLock() + defer fake.getServerConfigMutex.RUnlock() + return len(fake.getServerConfigArgsForCall) +} + +func (fake *CAConfig) GetServerConfigCalls(stub func() *v1.ServerConfig) { + fake.getServerConfigMutex.Lock() + defer fake.getServerConfigMutex.Unlock() + fake.GetServerConfigStub = stub +} + +func (fake *CAConfig) GetServerConfigReturns(result1 *v1.ServerConfig) { + fake.getServerConfigMutex.Lock() + defer fake.getServerConfigMutex.Unlock() + fake.GetServerConfigStub = nil + fake.getServerConfigReturns = struct { + result1 *v1.ServerConfig + }{result1} +} + +func (fake *CAConfig) GetServerConfigReturnsOnCall(i int, result1 *v1.ServerConfig) { + fake.getServerConfigMutex.Lock() + defer fake.getServerConfigMutex.Unlock() + fake.GetServerConfigStub = nil + if fake.getServerConfigReturnsOnCall == nil { + fake.getServerConfigReturnsOnCall = make(map[int]struct { + result1 *v1.ServerConfig + }) + } + fake.getServerConfigReturnsOnCall[i] = struct { + result1 *v1.ServerConfig + }{result1} +} + +func (fake *CAConfig) ParseCABlock() (map[string][]byte, error) { + fake.parseCABlockMutex.Lock() + ret, specificReturn := fake.parseCABlockReturnsOnCall[len(fake.parseCABlockArgsForCall)] + fake.parseCABlockArgsForCall = append(fake.parseCABlockArgsForCall, struct { + }{}) + stub := fake.ParseCABlockStub + fakeReturns := fake.parseCABlockReturns + fake.recordInvocation("ParseCABlock", []interface{}{}) + fake.parseCABlockMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAConfig) ParseCABlockCallCount() int { + fake.parseCABlockMutex.RLock() + defer fake.parseCABlockMutex.RUnlock() + return len(fake.parseCABlockArgsForCall) +} + +func (fake *CAConfig) ParseCABlockCalls(stub func() (map[string][]byte, error)) { + fake.parseCABlockMutex.Lock() + defer fake.parseCABlockMutex.Unlock() + fake.ParseCABlockStub = stub +} + +func (fake *CAConfig) ParseCABlockReturns(result1 map[string][]byte, result2 error) { + fake.parseCABlockMutex.Lock() + defer fake.parseCABlockMutex.Unlock() + fake.ParseCABlockStub = nil + fake.parseCABlockReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseCABlockReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseCABlockMutex.Lock() + defer fake.parseCABlockMutex.Unlock() + fake.ParseCABlockStub = nil + if fake.parseCABlockReturnsOnCall == nil { + fake.parseCABlockReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseCABlockReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseDBBlock() (map[string][]byte, error) { + fake.parseDBBlockMutex.Lock() + ret, specificReturn := fake.parseDBBlockReturnsOnCall[len(fake.parseDBBlockArgsForCall)] + fake.parseDBBlockArgsForCall = append(fake.parseDBBlockArgsForCall, struct { + }{}) + stub := fake.ParseDBBlockStub + fakeReturns := fake.parseDBBlockReturns + fake.recordInvocation("ParseDBBlock", []interface{}{}) + fake.parseDBBlockMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAConfig) ParseDBBlockCallCount() int { + fake.parseDBBlockMutex.RLock() + defer fake.parseDBBlockMutex.RUnlock() + return len(fake.parseDBBlockArgsForCall) +} + +func (fake *CAConfig) ParseDBBlockCalls(stub func() (map[string][]byte, error)) { + fake.parseDBBlockMutex.Lock() + defer fake.parseDBBlockMutex.Unlock() + fake.ParseDBBlockStub = stub +} + +func (fake *CAConfig) ParseDBBlockReturns(result1 map[string][]byte, result2 error) { + fake.parseDBBlockMutex.Lock() + defer fake.parseDBBlockMutex.Unlock() + fake.ParseDBBlockStub = nil + fake.parseDBBlockReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseDBBlockReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseDBBlockMutex.Lock() + defer fake.parseDBBlockMutex.Unlock() + fake.ParseDBBlockStub = nil + if fake.parseDBBlockReturnsOnCall == nil { + fake.parseDBBlockReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseDBBlockReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseIntermediateBlock() (map[string][]byte, error) { + fake.parseIntermediateBlockMutex.Lock() + ret, specificReturn := fake.parseIntermediateBlockReturnsOnCall[len(fake.parseIntermediateBlockArgsForCall)] + fake.parseIntermediateBlockArgsForCall = append(fake.parseIntermediateBlockArgsForCall, struct { + }{}) + stub := fake.ParseIntermediateBlockStub + fakeReturns := fake.parseIntermediateBlockReturns + fake.recordInvocation("ParseIntermediateBlock", []interface{}{}) + fake.parseIntermediateBlockMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAConfig) ParseIntermediateBlockCallCount() int { + fake.parseIntermediateBlockMutex.RLock() + defer fake.parseIntermediateBlockMutex.RUnlock() + return len(fake.parseIntermediateBlockArgsForCall) +} + +func (fake *CAConfig) ParseIntermediateBlockCalls(stub func() (map[string][]byte, error)) { + fake.parseIntermediateBlockMutex.Lock() + defer fake.parseIntermediateBlockMutex.Unlock() + fake.ParseIntermediateBlockStub = stub +} + +func (fake *CAConfig) ParseIntermediateBlockReturns(result1 map[string][]byte, result2 error) { + fake.parseIntermediateBlockMutex.Lock() + defer fake.parseIntermediateBlockMutex.Unlock() + fake.ParseIntermediateBlockStub = nil + fake.parseIntermediateBlockReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseIntermediateBlockReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseIntermediateBlockMutex.Lock() + defer fake.parseIntermediateBlockMutex.Unlock() + fake.ParseIntermediateBlockStub = nil + if fake.parseIntermediateBlockReturnsOnCall == nil { + fake.parseIntermediateBlockReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseIntermediateBlockReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseOperationsBlock() (map[string][]byte, error) { + fake.parseOperationsBlockMutex.Lock() + ret, specificReturn := fake.parseOperationsBlockReturnsOnCall[len(fake.parseOperationsBlockArgsForCall)] + fake.parseOperationsBlockArgsForCall = append(fake.parseOperationsBlockArgsForCall, struct { + }{}) + stub := fake.ParseOperationsBlockStub + fakeReturns := fake.parseOperationsBlockReturns + fake.recordInvocation("ParseOperationsBlock", []interface{}{}) + fake.parseOperationsBlockMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAConfig) ParseOperationsBlockCallCount() int { + fake.parseOperationsBlockMutex.RLock() + defer fake.parseOperationsBlockMutex.RUnlock() + return len(fake.parseOperationsBlockArgsForCall) +} + +func (fake *CAConfig) ParseOperationsBlockCalls(stub func() (map[string][]byte, error)) { + fake.parseOperationsBlockMutex.Lock() + defer fake.parseOperationsBlockMutex.Unlock() + fake.ParseOperationsBlockStub = stub +} + +func (fake *CAConfig) ParseOperationsBlockReturns(result1 map[string][]byte, result2 error) { + fake.parseOperationsBlockMutex.Lock() + defer fake.parseOperationsBlockMutex.Unlock() + fake.ParseOperationsBlockStub = nil + fake.parseOperationsBlockReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseOperationsBlockReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseOperationsBlockMutex.Lock() + defer fake.parseOperationsBlockMutex.Unlock() + fake.ParseOperationsBlockStub = nil + if fake.parseOperationsBlockReturnsOnCall == nil { + fake.parseOperationsBlockReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseOperationsBlockReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseTLSBlock() (map[string][]byte, error) { + fake.parseTLSBlockMutex.Lock() + ret, specificReturn := fake.parseTLSBlockReturnsOnCall[len(fake.parseTLSBlockArgsForCall)] + fake.parseTLSBlockArgsForCall = append(fake.parseTLSBlockArgsForCall, struct { + }{}) + stub := fake.ParseTLSBlockStub + fakeReturns := fake.parseTLSBlockReturns + fake.recordInvocation("ParseTLSBlock", []interface{}{}) + fake.parseTLSBlockMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAConfig) ParseTLSBlockCallCount() int { + fake.parseTLSBlockMutex.RLock() + defer fake.parseTLSBlockMutex.RUnlock() + return len(fake.parseTLSBlockArgsForCall) +} + +func (fake *CAConfig) ParseTLSBlockCalls(stub func() (map[string][]byte, error)) { + fake.parseTLSBlockMutex.Lock() + defer fake.parseTLSBlockMutex.Unlock() + fake.ParseTLSBlockStub = stub +} + +func (fake *CAConfig) ParseTLSBlockReturns(result1 map[string][]byte, result2 error) { + fake.parseTLSBlockMutex.Lock() + defer fake.parseTLSBlockMutex.Unlock() + fake.ParseTLSBlockStub = nil + fake.parseTLSBlockReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) ParseTLSBlockReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseTLSBlockMutex.Lock() + defer fake.parseTLSBlockMutex.Unlock() + fake.ParseTLSBlockStub = nil + if fake.parseTLSBlockReturnsOnCall == nil { + fake.parseTLSBlockReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseTLSBlockReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *CAConfig) SetMountPaths(arg1 config.Type) { + fake.setMountPathsMutex.Lock() + fake.setMountPathsArgsForCall = append(fake.setMountPathsArgsForCall, struct { + arg1 config.Type + }{arg1}) + stub := fake.SetMountPathsStub + fake.recordInvocation("SetMountPaths", []interface{}{arg1}) + fake.setMountPathsMutex.Unlock() + if stub != nil { + fake.SetMountPathsStub(arg1) + } +} + +func (fake *CAConfig) SetMountPathsCallCount() int { + fake.setMountPathsMutex.RLock() + defer fake.setMountPathsMutex.RUnlock() + return len(fake.setMountPathsArgsForCall) +} + +func (fake *CAConfig) SetMountPathsCalls(stub func(config.Type)) { + fake.setMountPathsMutex.Lock() + defer fake.setMountPathsMutex.Unlock() + fake.SetMountPathsStub = stub +} + +func (fake *CAConfig) SetMountPathsArgsForCall(i int) config.Type { + fake.setMountPathsMutex.RLock() + defer fake.setMountPathsMutex.RUnlock() + argsForCall := fake.setMountPathsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CAConfig) SetServerConfig(arg1 *v1.ServerConfig) { + fake.setServerConfigMutex.Lock() + fake.setServerConfigArgsForCall = append(fake.setServerConfigArgsForCall, struct { + arg1 *v1.ServerConfig + }{arg1}) + stub := fake.SetServerConfigStub + fake.recordInvocation("SetServerConfig", []interface{}{arg1}) + fake.setServerConfigMutex.Unlock() + if stub != nil { + fake.SetServerConfigStub(arg1) + } +} + +func (fake *CAConfig) SetServerConfigCallCount() int { + fake.setServerConfigMutex.RLock() + defer fake.setServerConfigMutex.RUnlock() + return len(fake.setServerConfigArgsForCall) +} + +func (fake *CAConfig) SetServerConfigCalls(stub func(*v1.ServerConfig)) { + fake.setServerConfigMutex.Lock() + defer fake.setServerConfigMutex.Unlock() + fake.SetServerConfigStub = stub +} + +func (fake *CAConfig) SetServerConfigArgsForCall(i int) *v1.ServerConfig { + fake.setServerConfigMutex.RLock() + defer fake.setServerConfigMutex.RUnlock() + argsForCall := fake.setServerConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CAConfig) SetUpdate(arg1 bool) { + fake.setUpdateMutex.Lock() + fake.setUpdateArgsForCall = append(fake.setUpdateArgsForCall, struct { + arg1 bool + }{arg1}) + stub := fake.SetUpdateStub + fake.recordInvocation("SetUpdate", []interface{}{arg1}) + fake.setUpdateMutex.Unlock() + if stub != nil { + fake.SetUpdateStub(arg1) + } +} + +func (fake *CAConfig) SetUpdateCallCount() int { + fake.setUpdateMutex.RLock() + defer fake.setUpdateMutex.RUnlock() + return len(fake.setUpdateArgsForCall) +} + +func (fake *CAConfig) SetUpdateCalls(stub func(bool)) { + fake.setUpdateMutex.Lock() + defer fake.setUpdateMutex.Unlock() + fake.SetUpdateStub = stub +} + +func (fake *CAConfig) SetUpdateArgsForCall(i int) bool { + fake.setUpdateMutex.RLock() + defer fake.setUpdateMutex.RUnlock() + argsForCall := fake.setUpdateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CAConfig) UsingPKCS11() bool { + fake.usingPKCS11Mutex.Lock() + ret, specificReturn := fake.usingPKCS11ReturnsOnCall[len(fake.usingPKCS11ArgsForCall)] + fake.usingPKCS11ArgsForCall = append(fake.usingPKCS11ArgsForCall, struct { + }{}) + stub := fake.UsingPKCS11Stub + fakeReturns := fake.usingPKCS11Returns + fake.recordInvocation("UsingPKCS11", []interface{}{}) + fake.usingPKCS11Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAConfig) UsingPKCS11CallCount() int { + fake.usingPKCS11Mutex.RLock() + defer fake.usingPKCS11Mutex.RUnlock() + return len(fake.usingPKCS11ArgsForCall) +} + +func (fake *CAConfig) UsingPKCS11Calls(stub func() bool) { + fake.usingPKCS11Mutex.Lock() + defer fake.usingPKCS11Mutex.Unlock() + fake.UsingPKCS11Stub = stub +} + +func (fake *CAConfig) UsingPKCS11Returns(result1 bool) { + fake.usingPKCS11Mutex.Lock() + defer fake.usingPKCS11Mutex.Unlock() + fake.UsingPKCS11Stub = nil + fake.usingPKCS11Returns = struct { + result1 bool + }{result1} +} + +func (fake *CAConfig) UsingPKCS11ReturnsOnCall(i int, result1 bool) { + fake.usingPKCS11Mutex.Lock() + defer fake.usingPKCS11Mutex.Unlock() + fake.UsingPKCS11Stub = nil + if fake.usingPKCS11ReturnsOnCall == nil { + fake.usingPKCS11ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.usingPKCS11ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *CAConfig) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + fake.getServerConfigMutex.RLock() + defer fake.getServerConfigMutex.RUnlock() + fake.parseCABlockMutex.RLock() + defer fake.parseCABlockMutex.RUnlock() + fake.parseDBBlockMutex.RLock() + defer fake.parseDBBlockMutex.RUnlock() + fake.parseIntermediateBlockMutex.RLock() + defer fake.parseIntermediateBlockMutex.RUnlock() + fake.parseOperationsBlockMutex.RLock() + defer fake.parseOperationsBlockMutex.RUnlock() + fake.parseTLSBlockMutex.RLock() + defer fake.parseTLSBlockMutex.RUnlock() + fake.setMountPathsMutex.RLock() + defer fake.setMountPathsMutex.RUnlock() + fake.setServerConfigMutex.RLock() + defer fake.setServerConfigMutex.RUnlock() + fake.setUpdateMutex.RLock() + defer fake.setUpdateMutex.RUnlock() + fake.usingPKCS11Mutex.RLock() + defer fake.usingPKCS11Mutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CAConfig) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ initializer.CAConfig = new(CAConfig) diff --git a/pkg/initializer/ca/mocks/ibpca.go b/pkg/initializer/ca/mocks/ibpca.go new file mode 100644 index 00000000..b010ab59 --- /dev/null +++ b/pkg/initializer/ca/mocks/ibpca.go @@ -0,0 +1,853 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + "github.com/hyperledger/fabric-ca/lib" +) + +type IBPCA struct { + ConfigToBytesStub func() ([]byte, error) + configToBytesMutex sync.RWMutex + configToBytesArgsForCall []struct { + } + configToBytesReturns struct { + result1 []byte + result2 error + } + configToBytesReturnsOnCall map[int]struct { + result1 []byte + result2 error + } + GetHomeDirStub func() string + getHomeDirMutex sync.RWMutex + getHomeDirArgsForCall []struct { + } + getHomeDirReturns struct { + result1 string + } + getHomeDirReturnsOnCall map[int]struct { + result1 string + } + GetServerConfigStub func() *v1.ServerConfig + getServerConfigMutex sync.RWMutex + getServerConfigArgsForCall []struct { + } + getServerConfigReturns struct { + result1 *v1.ServerConfig + } + getServerConfigReturnsOnCall map[int]struct { + result1 *v1.ServerConfig + } + GetTypeStub func() config.Type + getTypeMutex sync.RWMutex + getTypeArgsForCall []struct { + } + getTypeReturns struct { + result1 config.Type + } + getTypeReturnsOnCall map[int]struct { + result1 config.Type + } + InitStub func() error + initMutex sync.RWMutex + initArgsForCall []struct { + } + initReturns struct { + result1 error + } + initReturnsOnCall map[int]struct { + result1 error + } + IsBeingUpdatedStub func() + isBeingUpdatedMutex sync.RWMutex + isBeingUpdatedArgsForCall []struct { + } + OverrideServerConfigStub func(*v1.ServerConfig) error + overrideServerConfigMutex sync.RWMutex + overrideServerConfigArgsForCall []struct { + arg1 *v1.ServerConfig + } + overrideServerConfigReturns struct { + result1 error + } + overrideServerConfigReturnsOnCall map[int]struct { + result1 error + } + ParseCABlockStub func() (map[string][]byte, error) + parseCABlockMutex sync.RWMutex + parseCABlockArgsForCall []struct { + } + parseCABlockReturns struct { + result1 map[string][]byte + result2 error + } + parseCABlockReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + ParseCryptoStub func() (map[string][]byte, error) + parseCryptoMutex sync.RWMutex + parseCryptoArgsForCall []struct { + } + parseCryptoReturns struct { + result1 map[string][]byte + result2 error + } + parseCryptoReturnsOnCall map[int]struct { + result1 map[string][]byte + result2 error + } + RemoveHomeDirStub func() error + removeHomeDirMutex sync.RWMutex + removeHomeDirArgsForCall []struct { + } + removeHomeDirReturns struct { + result1 error + } + removeHomeDirReturnsOnCall map[int]struct { + result1 error + } + SetMountPathsStub func() + setMountPathsMutex sync.RWMutex + setMountPathsArgsForCall []struct { + } + ViperUnmarshalStub func(string) (*lib.ServerConfig, error) + viperUnmarshalMutex sync.RWMutex + viperUnmarshalArgsForCall []struct { + arg1 string + } + viperUnmarshalReturns struct { + result1 *lib.ServerConfig + result2 error + } + viperUnmarshalReturnsOnCall map[int]struct { + result1 *lib.ServerConfig + result2 error + } + WriteConfigStub func() error + writeConfigMutex sync.RWMutex + writeConfigArgsForCall []struct { + } + writeConfigReturns struct { + result1 error + } + writeConfigReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *IBPCA) ConfigToBytes() ([]byte, error) { + fake.configToBytesMutex.Lock() + ret, specificReturn := fake.configToBytesReturnsOnCall[len(fake.configToBytesArgsForCall)] + fake.configToBytesArgsForCall = append(fake.configToBytesArgsForCall, struct { + }{}) + stub := fake.ConfigToBytesStub + fakeReturns := fake.configToBytesReturns + fake.recordInvocation("ConfigToBytes", []interface{}{}) + fake.configToBytesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *IBPCA) ConfigToBytesCallCount() int { + fake.configToBytesMutex.RLock() + defer fake.configToBytesMutex.RUnlock() + return len(fake.configToBytesArgsForCall) +} + +func (fake *IBPCA) ConfigToBytesCalls(stub func() ([]byte, error)) { + fake.configToBytesMutex.Lock() + defer fake.configToBytesMutex.Unlock() + fake.ConfigToBytesStub = stub +} + +func (fake *IBPCA) ConfigToBytesReturns(result1 []byte, result2 error) { + fake.configToBytesMutex.Lock() + defer fake.configToBytesMutex.Unlock() + fake.ConfigToBytesStub = nil + fake.configToBytesReturns = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *IBPCA) ConfigToBytesReturnsOnCall(i int, result1 []byte, result2 error) { + fake.configToBytesMutex.Lock() + defer fake.configToBytesMutex.Unlock() + fake.ConfigToBytesStub = nil + if fake.configToBytesReturnsOnCall == nil { + fake.configToBytesReturnsOnCall = make(map[int]struct { + result1 []byte + result2 error + }) + } + fake.configToBytesReturnsOnCall[i] = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *IBPCA) GetHomeDir() string { + fake.getHomeDirMutex.Lock() + ret, specificReturn := fake.getHomeDirReturnsOnCall[len(fake.getHomeDirArgsForCall)] + fake.getHomeDirArgsForCall = append(fake.getHomeDirArgsForCall, struct { + }{}) + stub := fake.GetHomeDirStub + fakeReturns := fake.getHomeDirReturns + fake.recordInvocation("GetHomeDir", []interface{}{}) + fake.getHomeDirMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) GetHomeDirCallCount() int { + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + return len(fake.getHomeDirArgsForCall) +} + +func (fake *IBPCA) GetHomeDirCalls(stub func() string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = stub +} + +func (fake *IBPCA) GetHomeDirReturns(result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + fake.getHomeDirReturns = struct { + result1 string + }{result1} +} + +func (fake *IBPCA) GetHomeDirReturnsOnCall(i int, result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + if fake.getHomeDirReturnsOnCall == nil { + fake.getHomeDirReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getHomeDirReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *IBPCA) GetServerConfig() *v1.ServerConfig { + fake.getServerConfigMutex.Lock() + ret, specificReturn := fake.getServerConfigReturnsOnCall[len(fake.getServerConfigArgsForCall)] + fake.getServerConfigArgsForCall = append(fake.getServerConfigArgsForCall, struct { + }{}) + stub := fake.GetServerConfigStub + fakeReturns := fake.getServerConfigReturns + fake.recordInvocation("GetServerConfig", []interface{}{}) + fake.getServerConfigMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) GetServerConfigCallCount() int { + fake.getServerConfigMutex.RLock() + defer fake.getServerConfigMutex.RUnlock() + return len(fake.getServerConfigArgsForCall) +} + +func (fake *IBPCA) GetServerConfigCalls(stub func() *v1.ServerConfig) { + fake.getServerConfigMutex.Lock() + defer fake.getServerConfigMutex.Unlock() + fake.GetServerConfigStub = stub +} + +func (fake *IBPCA) GetServerConfigReturns(result1 *v1.ServerConfig) { + fake.getServerConfigMutex.Lock() + defer fake.getServerConfigMutex.Unlock() + fake.GetServerConfigStub = nil + fake.getServerConfigReturns = struct { + result1 *v1.ServerConfig + }{result1} +} + +func (fake *IBPCA) GetServerConfigReturnsOnCall(i int, result1 *v1.ServerConfig) { + fake.getServerConfigMutex.Lock() + defer fake.getServerConfigMutex.Unlock() + fake.GetServerConfigStub = nil + if fake.getServerConfigReturnsOnCall == nil { + fake.getServerConfigReturnsOnCall = make(map[int]struct { + result1 *v1.ServerConfig + }) + } + fake.getServerConfigReturnsOnCall[i] = struct { + result1 *v1.ServerConfig + }{result1} +} + +func (fake *IBPCA) GetType() config.Type { + fake.getTypeMutex.Lock() + ret, specificReturn := fake.getTypeReturnsOnCall[len(fake.getTypeArgsForCall)] + fake.getTypeArgsForCall = append(fake.getTypeArgsForCall, struct { + }{}) + stub := fake.GetTypeStub + fakeReturns := fake.getTypeReturns + fake.recordInvocation("GetType", []interface{}{}) + fake.getTypeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) GetTypeCallCount() int { + fake.getTypeMutex.RLock() + defer fake.getTypeMutex.RUnlock() + return len(fake.getTypeArgsForCall) +} + +func (fake *IBPCA) GetTypeCalls(stub func() config.Type) { + fake.getTypeMutex.Lock() + defer fake.getTypeMutex.Unlock() + fake.GetTypeStub = stub +} + +func (fake *IBPCA) GetTypeReturns(result1 config.Type) { + fake.getTypeMutex.Lock() + defer fake.getTypeMutex.Unlock() + fake.GetTypeStub = nil + fake.getTypeReturns = struct { + result1 config.Type + }{result1} +} + +func (fake *IBPCA) GetTypeReturnsOnCall(i int, result1 config.Type) { + fake.getTypeMutex.Lock() + defer fake.getTypeMutex.Unlock() + fake.GetTypeStub = nil + if fake.getTypeReturnsOnCall == nil { + fake.getTypeReturnsOnCall = make(map[int]struct { + result1 config.Type + }) + } + fake.getTypeReturnsOnCall[i] = struct { + result1 config.Type + }{result1} +} + +func (fake *IBPCA) Init() error { + fake.initMutex.Lock() + ret, specificReturn := fake.initReturnsOnCall[len(fake.initArgsForCall)] + fake.initArgsForCall = append(fake.initArgsForCall, struct { + }{}) + stub := fake.InitStub + fakeReturns := fake.initReturns + fake.recordInvocation("Init", []interface{}{}) + fake.initMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) InitCallCount() int { + fake.initMutex.RLock() + defer fake.initMutex.RUnlock() + return len(fake.initArgsForCall) +} + +func (fake *IBPCA) InitCalls(stub func() error) { + fake.initMutex.Lock() + defer fake.initMutex.Unlock() + fake.InitStub = stub +} + +func (fake *IBPCA) InitReturns(result1 error) { + fake.initMutex.Lock() + defer fake.initMutex.Unlock() + fake.InitStub = nil + fake.initReturns = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) InitReturnsOnCall(i int, result1 error) { + fake.initMutex.Lock() + defer fake.initMutex.Unlock() + fake.InitStub = nil + if fake.initReturnsOnCall == nil { + fake.initReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.initReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) IsBeingUpdated() { + fake.isBeingUpdatedMutex.Lock() + fake.isBeingUpdatedArgsForCall = append(fake.isBeingUpdatedArgsForCall, struct { + }{}) + stub := fake.IsBeingUpdatedStub + fake.recordInvocation("IsBeingUpdated", []interface{}{}) + fake.isBeingUpdatedMutex.Unlock() + if stub != nil { + fake.IsBeingUpdatedStub() + } +} + +func (fake *IBPCA) IsBeingUpdatedCallCount() int { + fake.isBeingUpdatedMutex.RLock() + defer fake.isBeingUpdatedMutex.RUnlock() + return len(fake.isBeingUpdatedArgsForCall) +} + +func (fake *IBPCA) IsBeingUpdatedCalls(stub func()) { + fake.isBeingUpdatedMutex.Lock() + defer fake.isBeingUpdatedMutex.Unlock() + fake.IsBeingUpdatedStub = stub +} + +func (fake *IBPCA) OverrideServerConfig(arg1 *v1.ServerConfig) error { + fake.overrideServerConfigMutex.Lock() + ret, specificReturn := fake.overrideServerConfigReturnsOnCall[len(fake.overrideServerConfigArgsForCall)] + fake.overrideServerConfigArgsForCall = append(fake.overrideServerConfigArgsForCall, struct { + arg1 *v1.ServerConfig + }{arg1}) + stub := fake.OverrideServerConfigStub + fakeReturns := fake.overrideServerConfigReturns + fake.recordInvocation("OverrideServerConfig", []interface{}{arg1}) + fake.overrideServerConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) OverrideServerConfigCallCount() int { + fake.overrideServerConfigMutex.RLock() + defer fake.overrideServerConfigMutex.RUnlock() + return len(fake.overrideServerConfigArgsForCall) +} + +func (fake *IBPCA) OverrideServerConfigCalls(stub func(*v1.ServerConfig) error) { + fake.overrideServerConfigMutex.Lock() + defer fake.overrideServerConfigMutex.Unlock() + fake.OverrideServerConfigStub = stub +} + +func (fake *IBPCA) OverrideServerConfigArgsForCall(i int) *v1.ServerConfig { + fake.overrideServerConfigMutex.RLock() + defer fake.overrideServerConfigMutex.RUnlock() + argsForCall := fake.overrideServerConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *IBPCA) OverrideServerConfigReturns(result1 error) { + fake.overrideServerConfigMutex.Lock() + defer fake.overrideServerConfigMutex.Unlock() + fake.OverrideServerConfigStub = nil + fake.overrideServerConfigReturns = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) OverrideServerConfigReturnsOnCall(i int, result1 error) { + fake.overrideServerConfigMutex.Lock() + defer fake.overrideServerConfigMutex.Unlock() + fake.OverrideServerConfigStub = nil + if fake.overrideServerConfigReturnsOnCall == nil { + fake.overrideServerConfigReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.overrideServerConfigReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) ParseCABlock() (map[string][]byte, error) { + fake.parseCABlockMutex.Lock() + ret, specificReturn := fake.parseCABlockReturnsOnCall[len(fake.parseCABlockArgsForCall)] + fake.parseCABlockArgsForCall = append(fake.parseCABlockArgsForCall, struct { + }{}) + stub := fake.ParseCABlockStub + fakeReturns := fake.parseCABlockReturns + fake.recordInvocation("ParseCABlock", []interface{}{}) + fake.parseCABlockMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *IBPCA) ParseCABlockCallCount() int { + fake.parseCABlockMutex.RLock() + defer fake.parseCABlockMutex.RUnlock() + return len(fake.parseCABlockArgsForCall) +} + +func (fake *IBPCA) ParseCABlockCalls(stub func() (map[string][]byte, error)) { + fake.parseCABlockMutex.Lock() + defer fake.parseCABlockMutex.Unlock() + fake.ParseCABlockStub = stub +} + +func (fake *IBPCA) ParseCABlockReturns(result1 map[string][]byte, result2 error) { + fake.parseCABlockMutex.Lock() + defer fake.parseCABlockMutex.Unlock() + fake.ParseCABlockStub = nil + fake.parseCABlockReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *IBPCA) ParseCABlockReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseCABlockMutex.Lock() + defer fake.parseCABlockMutex.Unlock() + fake.ParseCABlockStub = nil + if fake.parseCABlockReturnsOnCall == nil { + fake.parseCABlockReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseCABlockReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *IBPCA) ParseCrypto() (map[string][]byte, error) { + fake.parseCryptoMutex.Lock() + ret, specificReturn := fake.parseCryptoReturnsOnCall[len(fake.parseCryptoArgsForCall)] + fake.parseCryptoArgsForCall = append(fake.parseCryptoArgsForCall, struct { + }{}) + stub := fake.ParseCryptoStub + fakeReturns := fake.parseCryptoReturns + fake.recordInvocation("ParseCrypto", []interface{}{}) + fake.parseCryptoMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *IBPCA) ParseCryptoCallCount() int { + fake.parseCryptoMutex.RLock() + defer fake.parseCryptoMutex.RUnlock() + return len(fake.parseCryptoArgsForCall) +} + +func (fake *IBPCA) ParseCryptoCalls(stub func() (map[string][]byte, error)) { + fake.parseCryptoMutex.Lock() + defer fake.parseCryptoMutex.Unlock() + fake.ParseCryptoStub = stub +} + +func (fake *IBPCA) ParseCryptoReturns(result1 map[string][]byte, result2 error) { + fake.parseCryptoMutex.Lock() + defer fake.parseCryptoMutex.Unlock() + fake.ParseCryptoStub = nil + fake.parseCryptoReturns = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *IBPCA) ParseCryptoReturnsOnCall(i int, result1 map[string][]byte, result2 error) { + fake.parseCryptoMutex.Lock() + defer fake.parseCryptoMutex.Unlock() + fake.ParseCryptoStub = nil + if fake.parseCryptoReturnsOnCall == nil { + fake.parseCryptoReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + result2 error + }) + } + fake.parseCryptoReturnsOnCall[i] = struct { + result1 map[string][]byte + result2 error + }{result1, result2} +} + +func (fake *IBPCA) RemoveHomeDir() error { + fake.removeHomeDirMutex.Lock() + ret, specificReturn := fake.removeHomeDirReturnsOnCall[len(fake.removeHomeDirArgsForCall)] + fake.removeHomeDirArgsForCall = append(fake.removeHomeDirArgsForCall, struct { + }{}) + stub := fake.RemoveHomeDirStub + fakeReturns := fake.removeHomeDirReturns + fake.recordInvocation("RemoveHomeDir", []interface{}{}) + fake.removeHomeDirMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) RemoveHomeDirCallCount() int { + fake.removeHomeDirMutex.RLock() + defer fake.removeHomeDirMutex.RUnlock() + return len(fake.removeHomeDirArgsForCall) +} + +func (fake *IBPCA) RemoveHomeDirCalls(stub func() error) { + fake.removeHomeDirMutex.Lock() + defer fake.removeHomeDirMutex.Unlock() + fake.RemoveHomeDirStub = stub +} + +func (fake *IBPCA) RemoveHomeDirReturns(result1 error) { + fake.removeHomeDirMutex.Lock() + defer fake.removeHomeDirMutex.Unlock() + fake.RemoveHomeDirStub = nil + fake.removeHomeDirReturns = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) RemoveHomeDirReturnsOnCall(i int, result1 error) { + fake.removeHomeDirMutex.Lock() + defer fake.removeHomeDirMutex.Unlock() + fake.RemoveHomeDirStub = nil + if fake.removeHomeDirReturnsOnCall == nil { + fake.removeHomeDirReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.removeHomeDirReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) SetMountPaths() { + fake.setMountPathsMutex.Lock() + fake.setMountPathsArgsForCall = append(fake.setMountPathsArgsForCall, struct { + }{}) + stub := fake.SetMountPathsStub + fake.recordInvocation("SetMountPaths", []interface{}{}) + fake.setMountPathsMutex.Unlock() + if stub != nil { + fake.SetMountPathsStub() + } +} + +func (fake *IBPCA) SetMountPathsCallCount() int { + fake.setMountPathsMutex.RLock() + defer fake.setMountPathsMutex.RUnlock() + return len(fake.setMountPathsArgsForCall) +} + +func (fake *IBPCA) SetMountPathsCalls(stub func()) { + fake.setMountPathsMutex.Lock() + defer fake.setMountPathsMutex.Unlock() + fake.SetMountPathsStub = stub +} + +func (fake *IBPCA) ViperUnmarshal(arg1 string) (*lib.ServerConfig, error) { + fake.viperUnmarshalMutex.Lock() + ret, specificReturn := fake.viperUnmarshalReturnsOnCall[len(fake.viperUnmarshalArgsForCall)] + fake.viperUnmarshalArgsForCall = append(fake.viperUnmarshalArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.ViperUnmarshalStub + fakeReturns := fake.viperUnmarshalReturns + fake.recordInvocation("ViperUnmarshal", []interface{}{arg1}) + fake.viperUnmarshalMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *IBPCA) ViperUnmarshalCallCount() int { + fake.viperUnmarshalMutex.RLock() + defer fake.viperUnmarshalMutex.RUnlock() + return len(fake.viperUnmarshalArgsForCall) +} + +func (fake *IBPCA) ViperUnmarshalCalls(stub func(string) (*lib.ServerConfig, error)) { + fake.viperUnmarshalMutex.Lock() + defer fake.viperUnmarshalMutex.Unlock() + fake.ViperUnmarshalStub = stub +} + +func (fake *IBPCA) ViperUnmarshalArgsForCall(i int) string { + fake.viperUnmarshalMutex.RLock() + defer fake.viperUnmarshalMutex.RUnlock() + argsForCall := fake.viperUnmarshalArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *IBPCA) ViperUnmarshalReturns(result1 *lib.ServerConfig, result2 error) { + fake.viperUnmarshalMutex.Lock() + defer fake.viperUnmarshalMutex.Unlock() + fake.ViperUnmarshalStub = nil + fake.viperUnmarshalReturns = struct { + result1 *lib.ServerConfig + result2 error + }{result1, result2} +} + +func (fake *IBPCA) ViperUnmarshalReturnsOnCall(i int, result1 *lib.ServerConfig, result2 error) { + fake.viperUnmarshalMutex.Lock() + defer fake.viperUnmarshalMutex.Unlock() + fake.ViperUnmarshalStub = nil + if fake.viperUnmarshalReturnsOnCall == nil { + fake.viperUnmarshalReturnsOnCall = make(map[int]struct { + result1 *lib.ServerConfig + result2 error + }) + } + fake.viperUnmarshalReturnsOnCall[i] = struct { + result1 *lib.ServerConfig + result2 error + }{result1, result2} +} + +func (fake *IBPCA) WriteConfig() error { + fake.writeConfigMutex.Lock() + ret, specificReturn := fake.writeConfigReturnsOnCall[len(fake.writeConfigArgsForCall)] + fake.writeConfigArgsForCall = append(fake.writeConfigArgsForCall, struct { + }{}) + stub := fake.WriteConfigStub + fakeReturns := fake.writeConfigReturns + fake.recordInvocation("WriteConfig", []interface{}{}) + fake.writeConfigMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPCA) WriteConfigCallCount() int { + fake.writeConfigMutex.RLock() + defer fake.writeConfigMutex.RUnlock() + return len(fake.writeConfigArgsForCall) +} + +func (fake *IBPCA) WriteConfigCalls(stub func() error) { + fake.writeConfigMutex.Lock() + defer fake.writeConfigMutex.Unlock() + fake.WriteConfigStub = stub +} + +func (fake *IBPCA) WriteConfigReturns(result1 error) { + fake.writeConfigMutex.Lock() + defer fake.writeConfigMutex.Unlock() + fake.WriteConfigStub = nil + fake.writeConfigReturns = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) WriteConfigReturnsOnCall(i int, result1 error) { + fake.writeConfigMutex.Lock() + defer fake.writeConfigMutex.Unlock() + fake.WriteConfigStub = nil + if fake.writeConfigReturnsOnCall == nil { + fake.writeConfigReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.writeConfigReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *IBPCA) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.configToBytesMutex.RLock() + defer fake.configToBytesMutex.RUnlock() + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + fake.getServerConfigMutex.RLock() + defer fake.getServerConfigMutex.RUnlock() + fake.getTypeMutex.RLock() + defer fake.getTypeMutex.RUnlock() + fake.initMutex.RLock() + defer fake.initMutex.RUnlock() + fake.isBeingUpdatedMutex.RLock() + defer fake.isBeingUpdatedMutex.RUnlock() + fake.overrideServerConfigMutex.RLock() + defer fake.overrideServerConfigMutex.RUnlock() + fake.parseCABlockMutex.RLock() + defer fake.parseCABlockMutex.RUnlock() + fake.parseCryptoMutex.RLock() + defer fake.parseCryptoMutex.RUnlock() + fake.removeHomeDirMutex.RLock() + defer fake.removeHomeDirMutex.RUnlock() + fake.setMountPathsMutex.RLock() + defer fake.setMountPathsMutex.RUnlock() + fake.viperUnmarshalMutex.RLock() + defer fake.viperUnmarshalMutex.RUnlock() + fake.writeConfigMutex.RLock() + defer fake.writeConfigMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *IBPCA) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ initializer.IBPCA = new(IBPCA) diff --git a/pkg/initializer/ca/sw.go b/pkg/initializer/ca/sw.go new file mode 100644 index 00000000..25467476 --- /dev/null +++ b/pkg/initializer/ca/sw.go @@ -0,0 +1,74 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +type SW struct{} + +func (sw *SW) Create(instance *current.IBPCA, overrides *v1.ServerConfig, ca IBPCA) (*Response, error) { + var err error + + err = ca.RemoveHomeDir() + if err != nil { + return nil, err + } + + err = ca.OverrideServerConfig(overrides) + if err != nil { + return nil, err + } + + crypto, err := ca.ParseCrypto() + if err != nil { + return nil, err + } + + err = ca.WriteConfig() + if err != nil { + return nil, err + } + + err = ca.Init() + if err != nil { + return nil, err + } + + caBlock, err := ca.ParseCABlock() + if err != nil { + return nil, err + } + crypto = util.JoinMaps(crypto, caBlock) + + ca.SetMountPaths() + + err = ca.RemoveHomeDir() + if err != nil { + return nil, err + } + + return &Response{ + Config: ca.GetServerConfig(), + CryptoMap: crypto, + }, nil +} diff --git a/pkg/initializer/ca/tls/tls.go b/pkg/initializer/ca/tls/tls.go new file mode 100644 index 00000000..360af4a5 --- /dev/null +++ b/pkg/initializer/ca/tls/tls.go @@ -0,0 +1,178 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tls + +import ( + "crypto/ecdsa" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "os" + "path/filepath" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/cloudflare/cfssl/csr" + "github.com/hyperledger/fabric-ca/api" + cautil "github.com/hyperledger/fabric-ca/util" + "github.com/hyperledger/fabric/bccsp/factory" +) + +type TLS struct { + CAHomeDir string + CSP *factory.FactoryOpts +} + +func (t *TLS) GenerateSelfSignedTLSCrypto(csr *api.CSRInfo) ([]byte, error) { + err := os.RemoveAll(filepath.Join(t.CAHomeDir, "tls")) + if err != nil { + return nil, err + } + + csp, err := cautil.InitBCCSP(&t.CSP, "msp", filepath.Join(t.CAHomeDir, "tls")) + if err != nil { + return nil, err + } + + cr := NewCertificateRequest(csr) + privKey, signer, err := cautil.BCCSPKeyRequestGenerate(cr, csp) + if err != nil { + return nil, err + } + + notBefore := time.Now() + notBefore.Add(-5 * time.Minute) + notAfter := notBefore.Add(time.Hour * 24 * 365) // Valid for one year + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, err + } + + subject := pkix.Name{ + CommonName: csr.CN, + SerialNumber: csr.SerialNumber, + } + if len(csr.Names) != 0 { + for _, name := range csr.Names { + subject.Country = append(subject.Country, name.C) + subject.Province = append(subject.Province, name.ST) + subject.Locality = append(subject.Locality, name.L) + subject.Organization = append(subject.Organization, name.O) + subject.OrganizationalUnit = append(subject.OrganizationalUnit, name.OU) + } + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: subject, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + for _, h := range csr.Hosts { + ip := net.ParseIP(h) + if ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + + pubKey, err := privKey.PublicKey() + if err != nil { + return nil, err + } + pubKeyBytes, err := pubKey.Bytes() + if err != nil { + return nil, err + } + pub, err := x509.ParsePKIXPublicKey(pubKeyBytes) + if err != nil { + return nil, err + } + ecdsaPubKey := pub.(*ecdsa.PublicKey) + + certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, ecdsaPubKey, signer) + if err != nil { + return nil, err + } + + return certBytes, nil +} + +func (t *TLS) WriteCryptoToFile(cert []byte, certName string) error { + certPath := filepath.Join(t.CAHomeDir, "tls", certName) + err := util.EnsureDir(filepath.Dir(certPath)) + if err != nil { + return err + } + + certOut, err := os.Create(filepath.Clean(certPath)) + if err != nil { + return err + } + err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: cert}) + if err != nil { + return err + } + err = certOut.Close() + if err != nil { + return err + } + + return nil +} + +func NewCertificateRequest(req *api.CSRInfo) *csr.CertificateRequest { + cr := csr.CertificateRequest{} + if req != nil && req.Names != nil { + cr.Names = req.Names + } + if req != nil && req.Hosts != nil { + cr.Hosts = req.Hosts + } else { + // Default requested hosts are local hostname + hostname, err := os.Hostname() + if err == nil && hostname != "" { + cr.Hosts = make([]string, 1) + cr.Hosts[0] = hostname + } + } + if req != nil && req.KeyRequest != nil { + cr.KeyRequest = newCfsslKeyRequest(req.KeyRequest) + } + if req != nil { + cr.CA = req.CA + cr.SerialNumber = req.SerialNumber + } + return &cr +} + +func newCfsslKeyRequest(bkr *api.KeyRequest) *csr.KeyRequest { + return &csr.KeyRequest{A: bkr.Algo, S: bkr.Size} +} diff --git a/pkg/initializer/ca/tls/tls_suite_test.go b/pkg/initializer/ca/tls/tls_suite_test.go new file mode 100644 index 00000000..c7b8648d --- /dev/null +++ b/pkg/initializer/ca/tls/tls_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tls_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestTls(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Tls Suite") +} diff --git a/pkg/initializer/ca/tls/tls_test.go b/pkg/initializer/ca/tls/tls_test.go new file mode 100644 index 00000000..5854548b --- /dev/null +++ b/pkg/initializer/ca/tls/tls_test.go @@ -0,0 +1,112 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tls_test + +import ( + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/tls" + cfcsr "github.com/cloudflare/cfssl/csr" + "github.com/hyperledger/fabric-ca/api" + "github.com/hyperledger/fabric/bccsp/factory" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("generating TLS crypto", func() { + var ( + tlsGen *tls.TLS + csr *api.CSRInfo + ) + + BeforeEach(func() { + csp := &factory.FactoryOpts{ + ProviderName: "SW", + } + tlsGen = &tls.TLS{ + CAHomeDir: "crypto", + CSP: csp, + } + + csr = &api.CSRInfo{ + CN: "tls-ca", + Names: []cfcsr.Name{ + cfcsr.Name{ + C: "United States", + ST: "North Carolina", + L: "Raleigh", + O: "IBM", + OU: "Blockchain", + }, + }, + Hosts: []string{"localhost", "127.0.0.1"}, + } + }) + + AfterEach(func() { + err := os.RemoveAll("crypto") + Expect(err).NotTo(HaveOccurred()) + }) + + It("generates key and self-signed TLS certificate", func() { + certBytes, err := tlsGen.GenerateSelfSignedTLSCrypto(csr) + Expect(err).NotTo(HaveOccurred()) + + By("returning a properly populated certificate", func() { + cert, err := x509.ParseCertificate(certBytes) + Expect(err).NotTo(HaveOccurred()) + + Expect(cert.Subject.Country).To(Equal([]string{"United States"})) + Expect(cert.Subject.Province).To(Equal([]string{"North Carolina"})) + Expect(cert.Subject.Locality).To(Equal([]string{"Raleigh"})) + Expect(cert.Subject.Organization).To(Equal([]string{"IBM"})) + Expect(cert.Subject.OrganizationalUnit).To(Equal([]string{"Blockchain"})) + + Expect(cert.DNSNames[0]).To(Equal("localhost")) + Expect(fmt.Sprintf("%s", cert.IPAddresses[0])).To(Equal("127.0.0.1")) + + Expect(cert.Subject).To(Equal(cert.Issuer)) + }) + + By("writing the private key to proper location", func() { + keystorePath := filepath.Join(tlsGen.CAHomeDir, "tls/msp/keystore") + Expect(keystorePath).Should(BeADirectory()) + + files, err := ioutil.ReadDir(keystorePath) + Expect(err).NotTo(HaveOccurred()) + Expect(len(files)).NotTo(Equal(0)) + Expect(files[0].Name()).To(ContainSubstring("sk")) + }) + }) + + It("stores the certificate in the proper directory", func() { + certBytes, err := tlsGen.GenerateSelfSignedTLSCrypto(csr) + Expect(err).NotTo(HaveOccurred()) + + err = tlsGen.WriteCryptoToFile(certBytes, "tls-cert.pem") + Expect(err).NotTo(HaveOccurred()) + + certPath := filepath.Join(tlsGen.CAHomeDir, "tls", "tls-cert.pem") + Expect(certPath).Should(BeAnExistingFile()) + }) +}) diff --git a/pkg/initializer/common/common.go b/pkg/initializer/common/common.go new file mode 100644 index 00000000..e3a482bc --- /dev/null +++ b/pkg/initializer/common/common.go @@ -0,0 +1,253 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mspparser" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/validator" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("initializer") + +type SecretType string + +var ( + ECERT SecretType = "ecert" + TLS SecretType = "tls" +) + +type Instance interface { + metav1.Object + runtime.Object + EnrollerImage() string + GetPullSecrets() []corev1.LocalObjectReference + IsHSMEnabled() bool + UsingHSMProxy() bool + GetConfigOverride() (interface{}, error) +} + +// NOTE: Modifies cryptos object passed as param +func GetCommonEnrollers(cryptos *config.Cryptos, enrollmentSpec *current.EnrollmentSpec, storagePath string) error { + if enrollmentSpec.TLS != nil && cryptos.TLS == nil { + bytes, err := enrollmentSpec.TLS.GetCATLSBytes() + if err != nil { + return err + } + + caClient := enroller.NewFabCAClient( + enrollmentSpec.TLS, + filepath.Join(storagePath, "tls"), + nil, + bytes, + ) + cryptos.TLS = enroller.New(enroller.NewSWEnroller(caClient)) + } + + if enrollmentSpec.ClientAuth != nil && cryptos.ClientAuth == nil { + bytes, err := enrollmentSpec.ClientAuth.GetCATLSBytes() + if err != nil { + return err + } + + caClient := enroller.NewFabCAClient( + enrollmentSpec.ClientAuth, + filepath.Join(storagePath, "clientauth"), + nil, + bytes, + ) + cryptos.ClientAuth = enroller.New(enroller.NewSWEnroller(caClient)) + } + + return nil +} + +// NOTE: Modifies cryptos object passed as param +func GetMSPCrypto(cryptos *config.Cryptos, mspSpec *current.MSPSpec) error { + if mspSpec != nil { + if mspSpec.Component != nil { + cryptos.Enrollment = mspparser.New(mspSpec.Component) + } + + if mspSpec.TLS != nil { + cryptos.TLS = mspparser.New(mspSpec.TLS) + } + + if mspSpec.ClientAuth != nil { + cryptos.ClientAuth = mspparser.New(mspSpec.ClientAuth) + } + } + + return nil +} + +//go:generate counterfeiter -o mocks/cryptovalidator.go -fake-name CryptoValidator . CryptoValidator +type CryptoValidator interface { + CheckEcertCrypto(v1.Object, string) error + CheckTLSCrypto(v1.Object, string) error + CheckClientAuthCrypto(v1.Object, string) error + SetHSMEnabled(bool) +} + +func CheckCrypto(cryptoValidator CryptoValidator, instance v1.Object, checkClientAuth bool) error { + name := instance.GetName() + + err := cryptoValidator.CheckEcertCrypto(instance, name) + if err != nil { + if validator.CheckError(err) { + return errors.Wrap(err, "missing ecert crypto") + } + } + + err = cryptoValidator.CheckTLSCrypto(instance, name) + if err != nil { + if validator.CheckError(err) { + log.Info(fmt.Sprintf("missing TLS crypto: %s", err.Error())) + return errors.Wrap(err, "missing TLS crypto") + } + } + + if checkClientAuth { + err := cryptoValidator.CheckClientAuthCrypto(instance, name) + if validator.CheckError(err) { + log.Info(fmt.Sprintf("missing Client Auth crypto: %s", err.Error())) + return errors.Wrap(err, "missing Client Auth crypto") + } + } + + return nil +} + +func GetAdminCertsFromSecret(client k8sclient.Client, instance v1.Object) map[string][]byte { + prefix := "ecert-" + instance.GetName() + namespacedName := types.NamespacedName{ + Name: prefix + "-admincerts", + Namespace: instance.GetNamespace(), + } + + certs := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, certs) + if err != nil { + if !k8serrors.IsNotFound(err) { + return nil + } + + return map[string][]byte{} + } + + return certs.Data +} + +func GetAdminCertsFromSpec(spec *current.SecretSpec) []string { + adminCerts := []string{} + if spec != nil { + if spec.MSP != nil { + if spec.MSP.Component != nil { + adminCerts = append(adminCerts, spec.MSP.Component.AdminCerts...) + } + } else if spec.Enrollment != nil { + if spec.Enrollment.Component != nil { + adminCerts = append(adminCerts, spec.Enrollment.Component.AdminCerts...) + } + } + } + + return adminCerts +} + +// Check for equality between two list of certificates. Order of certificates in the lists +// is ignored, if the two lists contain the same exact certificates this returns true +func CheckIfCertsDifferent(current map[string][]byte, updated []string) (bool, error) { + // Only detect a difference if the list of updated certificates is not empty + if len(current) != len(updated) && len(updated) > 0 { + return true, nil + } + + for _, newCert := range updated { + certFound := false + newCertBytes, err := util.Base64ToBytes(newCert) + if err != nil { + return false, err + } + + for _, certBytes := range current { + if bytes.Equal(certBytes, newCertBytes) { + certFound = true + break + } + } + + if !certFound { + return true, nil + } + } + + return false, nil +} + +func ConvertCertsToBytes(certs []string) ([][]byte, error) { + certBytes := [][]byte{} + for _, cert := range certs { + bytes, err := util.Base64ToBytes(cert) + if err != nil { + return nil, err + } + certBytes = append(certBytes, bytes) + } + return certBytes, nil +} + +func GetConfigFromConfigMap(client k8sclient.Client, instance v1.Object) (*corev1.ConfigMap, error) { + name := fmt.Sprintf("%s-config", instance.GetName()) + log.Info(fmt.Sprintf("Get config map '%s'...", name)) + + cm := &corev1.ConfigMap{} + n := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + err := client.Get(context.TODO(), n, cm) + if err != nil { + return nil, err + } + + return cm, nil + +} diff --git a/pkg/initializer/common/common_suite_test.go b/pkg/initializer/common/common_suite_test.go new file mode 100644 index 00000000..9dbd6afc --- /dev/null +++ b/pkg/initializer/common/common_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCommon(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Common Suite") +} diff --git a/pkg/initializer/common/common_test.go b/pkg/initializer/common/common_test.go new file mode 100644 index 00000000..d91c973c --- /dev/null +++ b/pkg/initializer/common/common_test.go @@ -0,0 +1,127 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common_test + +import ( + "encoding/base64" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +var _ = Describe("Common", func() { + var ( + mockValidator *mocks.CryptoValidator + instance *current.IBPPeer + ) + + BeforeEach(func() { + mockValidator = &mocks.CryptoValidator{} + + instance = ¤t.IBPPeer{} + instance.Name = "instance1" + }) + + Context("check crypto", func() { + It("returns true, if missing a ecert crypto", func() { + mockValidator.CheckEcertCryptoReturns(errors.New("not found")) + err := common.CheckCrypto(mockValidator, instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing ecert crypto")) + }) + + It("returns true, if missing a tls crypto", func() { + mockValidator.CheckTLSCryptoReturns(errors.New("not found")) + err := common.CheckCrypto(mockValidator, instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing TLS crypto")) + }) + + It("returns true, if missing a tls crypto", func() { + mockValidator.CheckClientAuthCryptoReturns(errors.New("not found")) + err := common.CheckCrypto(mockValidator, instance, true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing Client Auth crypto")) + }) + + It("returns false, if all crypto found and is in proper format", func() { + err := common.CheckCrypto(mockValidator, instance, true) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("check if certificates are different", func() { + var ( + currentCerts map[string][]byte + base64cert string + base64cert2 string + ) + + BeforeEach(func() { + base64cert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvVENDQWtlZ0F3SUJBZ0lVTUwrYW4vS2QwRllaazhLTDRRMUQ2eHVJK08wd0NnWUlLb1pJemowRUF3SXcKV2pFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVFzd0NRWURWUVFERXdKallUQWVGdzB5Ck1EQTJNVGd5TVRRNU1EQmFGdzB5TVRBMk1UZ3lNVFUwTURCYU1HRXhDekFKQmdOVkJBWVRBbFZUTVJjd0ZRWUQKVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFVU1CSUdBMVVFQ2hNTFNIbHdaWEpzWldSblpYSXhEakFNQmdOVgpCQXNUQldGa2JXbHVNUk13RVFZRFZRUURFd3B3WldWeUxXRmtiV2x1TUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJCnpqMERBUWNEUWdBRVRDOXVtbDExU240UVlDQklPWnlUdGxXVHhFTy90R1Q0cGFNMXVYcXF0dlhkMWVSR1RSMVcKL0x2M0Y3K1k3M1cxZ0VqeEp0UkZaY0oxN3pOZUVHc2lYYU9CNHpDQjREQU9CZ05WSFE4QkFmOEVCQU1DQjRBdwpEQVlEVlIwVEFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVVNsbVJ4a2JJMzNteHNLaEVtY1R6eVZYeHNkOHdId1lEClZSMGpCQmd3Rm9BVStKWU5rWFgyb0VUREdVbHl2OEdHcDk3YUM4RXdJZ1lEVlIwUkJCc3dHWUlYVTJGaFpITXQKVFdGalFtOXZheTFRY204dWJHOWpZV3d3WEFZSUtnTUVCUVlIQ0FFRVVIc2lZWFIwY25NaU9uc2lhR1l1UVdabQphV3hwWVhScGIyNGlPaUlpTENKb1ppNUZibkp2Ykd4dFpXNTBTVVFpT2lKd1pXVnlMV0ZrYldsdUlpd2lhR1l1ClZIbHdaU0k2SW1Ga2JXbHVJbjE5TUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFDZWRLazZPcVczR3JmdDZQWksKUHZwWUdla1c4NzdsUmgvOUtERHNWdlJKYlFJZ01aanRja2dBL2RTN0VjUXJ5VHl2cHB0TTdKWWJoZGRrZDdTcgp5TXl0b3c0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + base64cert2 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNyRENDQWxPZ0F3SUJBZ0lVRUEwRGE1Ym5Eb1JzbWZLWGE4d0U5NkxNdTJBd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TURZeE9ESXdORGt3TUZvWERUSXhNRFl4T0RJd05UUXdNRm93WURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRU9NQXdHQTFVRUN4TUZZV1J0YVc0eEVqQVFCZ05WQkFNVENYQmxaWEpoWkcxcGJqQlpNQk1HCkJ5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCT0MwRG1vMm5MUUd4YzJRcnMyTlRUZ3hOdy9MTVluRWFheVQKQ0RKNldFVmlod2VPQ01WeTZ6MkVLVG81MHZsSm40aGd0VXhYR2xzb1AvN1YxZHdyMi9pamdlSXdnZDh3RGdZRApWUjBQQVFIL0JBUURBZ2VBTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEVlIwT0JCWUVGRWVlSEZTUjladmMyeUxZCkZ3T1pkV0Iva0ozdU1COEdBMVVkSXdRWU1CYUFGSWd0eTR2U0VUZllCeDBTS1BPdExQQmZ0YTVxTUNJR0ExVWQKRVFRYk1CbUNGMU5oWVdSekxVMWhZMEp2YjJzdFVISnZMbXh2WTJGc01Gc0dDQ29EQkFVR0J3Z0JCRTk3SW1GMApkSEp6SWpwN0ltaG1Ma0ZtWm1sc2FXRjBhVzl1SWpvaUlpd2lhR1l1Ulc1eWIyeHNiV1Z1ZEVsRUlqb2ljR1ZsCmNtRmtiV2x1SWl3aWFHWXVWSGx3WlNJNkltRmtiV2x1SW4xOU1Bb0dDQ3FHU000OUJBTUNBMGNBTUVRQ0lGTFoKNnBCMWpDaWZIejRVTlZqd0p3RjlKUWZ2UCsxbFpJN0JydjFYdi9nUkFpQk0yMVg4N1N1V2tWaEdGRUpPOElnMQptMU9SNkZKSzBMUEN4SkU3bnlMdTRRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + + certBytes, err := base64.StdEncoding.DecodeString(base64cert) + Expect(err).NotTo(HaveOccurred()) + + cert2Bytes, err := base64.StdEncoding.DecodeString(base64cert2) + Expect(err).NotTo(HaveOccurred()) + + currentCerts = map[string][]byte{ + "cert1.pem": certBytes, + "cert2.pem": cert2Bytes, + } + }) + + It("returns false if list of certificates is equal", func() { + newCerts := []string{base64cert2, base64cert} + updated, err := common.CheckIfCertsDifferent(currentCerts, newCerts) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(false)) + }) + + It("returns true if list of certificates is not equal", func() { + base64cert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNyRENDQWxPZ0F3SUJBZ0lVVlM0WXQ3aFRUYnZFVWk4S1R0QWpEU0pHUG5jd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEl3TURZeE9ESXdOREV3TUZvWERUSXhNRFl4T0RJd05EWXdNRm93WURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRU9NQXdHQTFVRUN4TUZZV1J0YVc0eEVqQVFCZ05WQkFNVENYQmxaWEpoWkcxcGJqQlpNQk1HCkJ5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCSHBSRjJKRkhLZnVxNUR0bHArZDJGak0rZytacWJCY0FGN3QKQVpTM2VBL2JzRTNIcllLUWRaelNGSzhNUStGQnF5cFYrdEpDaldWMktZRFRvTGJvTk5DamdlSXdnZDh3RGdZRApWUjBQQVFIL0JBUURBZ2VBTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEVlIwT0JCWUVGRWRRRHQwMDJSWGpwcXdnCmFjMTJuK3FlVHdTN01COEdBMVVkSXdRWU1CYUFGSWd0eTR2U0VUZllCeDBTS1BPdExQQmZ0YTVxTUNJR0ExVWQKRVFRYk1CbUNGMU5oWVdSekxVMWhZMEp2YjJzdFVISnZMbXh2WTJGc01Gc0dDQ29EQkFVR0J3Z0JCRTk3SW1GMApkSEp6SWpwN0ltaG1Ma0ZtWm1sc2FXRjBhVzl1SWpvaUlpd2lhR1l1Ulc1eWIyeHNiV1Z1ZEVsRUlqb2ljR1ZsCmNtRmtiV2x1SWl3aWFHWXVWSGx3WlNJNkltRmtiV2x1SW4xOU1Bb0dDQ3FHU000OUJBTUNBMGNBTUVRQ0lGZEQKODVFY2ErcTFralRmTGNLZlZhalVBb2I2OGtwUzUrM0ZraitsdUo1MUFpQTluZmRiZnMxYUpEV2VpUTdFOFdqLwpLOXgxRHUzY051Nno3Ym9leldlM1FRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + + newCerts := []string{base64cert2, base64cert} + updated, err := common.CheckIfCertsDifferent(currentCerts, newCerts) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(true)) + }) + + It("return true if list of certificates are different lengths", func() { + newCerts := []string{base64cert} + updated, err := common.CheckIfCertsDifferent(currentCerts, newCerts) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(true)) + }) + + It("returns false if list of updated certificates is empty", func() { + newCerts := []string{} + updated, err := common.CheckIfCertsDifferent(currentCerts, newCerts) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(false)) + }) + }) + +}) diff --git a/pkg/initializer/common/config/config_suite_test.go b/pkg/initializer/common/config/config_suite_test.go new file mode 100644 index 00000000..eff0b39f --- /dev/null +++ b/pkg/initializer/common/config/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Config Suite") +} diff --git a/pkg/initializer/common/config/config_test.go b/pkg/initializer/common/config/config_test.go new file mode 100644 index 00000000..ef9a1a89 --- /dev/null +++ b/pkg/initializer/common/config/config_test.go @@ -0,0 +1,162 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + common "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +var _ = Describe("Peer configuration", func() { + Context("verify cert OU", func() { + var ( + resp *common.Response + certtemplate *x509.Certificate + ) + + BeforeEach(func() { + certtemplate = &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + OrganizationalUnit: []string{"peer", "orderer", "admin"}, + }, + } + certBytes := createCertBytes(certtemplate) + + resp = &common.Response{ + SignCert: certBytes, + AdminCerts: [][]byte{certBytes}, + } + }) + + It("returns error if peer signcert doesn't have OU type 'peer'", func() { + certtemplate.Subject.OrganizationalUnit = []string{"invalidou"} + certbytes := createCertBytes(certtemplate) + resp.SignCert = certbytes + + err := resp.VerifyCertOU("peer") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("invalid OU for signcert: cert does not have right OU, expecting 'peer'")) + }) + + It("return error if sign cert has no OU defined", func() { + certtemplate.Subject.OrganizationalUnit = nil + certbytes := createCertBytes(certtemplate) + resp.SignCert = certbytes + + err := resp.VerifyCertOU("peer") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("invalid OU for signcert: OU not defined")) + }) + + It("verifies that peer signcert has correct OU", func() { + err := resp.VerifyCertOU("peer") + Expect(err).NotTo(HaveOccurred()) + }) + + It("verifies that orderer signcert and admincerts have correct OU", func() { + err := resp.VerifyCertOU("orderer") + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("generate crypto response", func() { + var ( + cryptos *config.Cryptos + enrollmentCrypto *mocks.Crypto + tlsCrypto *mocks.Crypto + clientAuthCrypto *mocks.Crypto + ) + + BeforeEach(func() { + enrollmentCrypto = &mocks.Crypto{} + tlsCrypto = &mocks.Crypto{} + clientAuthCrypto = &mocks.Crypto{} + + cryptos = &config.Cryptos{ + Enrollment: enrollmentCrypto, + TLS: tlsCrypto, + ClientAuth: clientAuthCrypto, + } + }) + + Context("enrollment", func() { + It("returns an error on failure", func() { + msg := "could not enrollment get crypto" + enrollmentCrypto.GetCryptoReturns(nil, errors.New(msg)) + _, err := cryptos.GenerateCryptoResponse() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + }) + + Context("tls", func() { + It("returns an error on failure", func() { + msg := "could not tls get crypto" + tlsCrypto.GetCryptoReturns(nil, errors.New(msg)) + _, err := cryptos.GenerateCryptoResponse() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + }) + + Context("client auth", func() { + It("returns an error on failure", func() { + msg := "could not client auth get crypto" + clientAuthCrypto.GetCryptoReturns(nil, errors.New(msg)) + _, err := cryptos.GenerateCryptoResponse() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + }) + + It("gets crypto", func() { + resp, err := cryptos.GenerateCryptoResponse() + Expect(err).NotTo(HaveOccurred()) + Expect(resp).NotTo(BeNil()) + }) + }) +}) + +func createCertBytes(certTemplate *x509.Certificate) []byte { + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + Expect(err).NotTo(HaveOccurred()) + + cert, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &priv.PublicKey, priv) + Expect(err).NotTo(HaveOccurred()) + + block := &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert, + } + + return pem.EncodeToMemory(block) +} diff --git a/pkg/initializer/common/config/crypto.go b/pkg/initializer/common/config/crypto.go new file mode 100644 index 00000000..1ffbc716 --- /dev/null +++ b/pkg/initializer/common/config/crypto.go @@ -0,0 +1,149 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +//go:generate counterfeiter -o mocks/crypto.go -fake-name Crypto . Crypto + +type Crypto interface { + GetCrypto() (*Response, error) + PingCA() error + Validate() error +} + +// TODO: Next refactor should move this outside of config package into cryptogen package +// along with the Response struct, which is required to avoid cyclical dependencies +func GenerateCrypto(generator Crypto) (*Response, error) { + if err := generator.PingCA(); err != nil { + return nil, errors.Wrap(err, "ca is not reachable") + } + + if err := generator.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid crypto") + } + + return generator.GetCrypto() +} + +type Cryptos struct { + Enrollment Crypto + TLS Crypto + ClientAuth Crypto +} + +func (c *Cryptos) GenerateCryptoResponse() (*CryptoResponse, error) { + response := &CryptoResponse{} + + if c.Enrollment != nil { + resp, err := GenerateCrypto(c.Enrollment) + if err != nil { + return nil, err + } + + response.Enrollment = resp + } + + if c.TLS != nil { + resp, err := GenerateCrypto(c.TLS) + if err != nil { + return nil, err + } + + response.TLS = resp + } + + if c.ClientAuth != nil { + resp, err := GenerateCrypto(c.ClientAuth) + if err != nil { + return nil, err + } + + response.ClientAuth = resp + } + + return response, nil +} + +type CryptoResponse struct { + Enrollment *Response + TLS *Response + ClientAuth *Response +} + +func (c *CryptoResponse) VerifyCertOU(crType string) error { + if c.Enrollment != nil { + err := c.Enrollment.VerifyCertOU(crType) + if err != nil { + return errors.Wrapf(err, "invalid OU for %s identity", crType) + } + } + return nil +} + +type Response struct { + CACerts [][]byte + IntermediateCerts [][]byte + AdminCerts [][]byte + SignCert []byte + Keystore []byte +} + +func (r *Response) VerifyCertOU(crType string) error { + if r.SignCert == nil || len(r.SignCert) == 0 { + return nil + } + + crType = strings.ToLower(crType) + + err := verifyCertOU(r.SignCert, crType) + if err != nil { + return errors.Wrap(err, "invalid OU for signcert") + } + + if r.AdminCerts == nil { + return nil + } + + return nil +} + +func verifyCertOU(pemBytes []byte, ou string) error { + cert, err := util.GetCertificateFromPEMBytes(pemBytes) + if err != nil { + return err + } + + if cert.Subject.OrganizationalUnit == nil { + return errors.New("OU not defined") + } + + if !util.FindStringInArray(ou, cert.Subject.OrganizationalUnit) { + return errors.New(fmt.Sprintf("cert does not have right OU, expecting '%s'", ou)) + } + + return nil +} diff --git a/pkg/initializer/common/config/hsmconfig.go b/pkg/initializer/common/config/hsmconfig.go new file mode 100644 index 00000000..8249a015 --- /dev/null +++ b/pkg/initializer/common/config/hsmconfig.go @@ -0,0 +1,197 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "context" + + "github.com/pkg/errors" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" +) + +var log = logf.Log.WithName("config") + +// Client defines the contract to get resources from clusters +type Client interface { + Get(ctx context.Context, key client.ObjectKey, obj client.Object) error +} + +// ReadHSMConfig reads hsm configuration from 'ibp-hsm-config', and key 'ibp-hsm-config.yaml' +// from data +func ReadHSMConfig(client Client, instance metav1.Object) (*HSMConfig, error) { + // NOTE: This is hard-coded because this name should never be different + name := "ibp-hsm-config" + + cm := &corev1.ConfigMap{} + err := client.Get( + context.TODO(), + types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + }, + cm, + ) + if err != nil { + return nil, errors.Wrap(err, "failed to get hsm config 'ibp-hsm-config'") + } + + hsmConfig := &HSMConfig{} + err = yaml.Unmarshal([]byte(cm.Data["ibp-hsm-config.yaml"]), hsmConfig) + if err != nil { + return nil, err + } + + return hsmConfig, nil +} + +// HSMConfig defines the configuration parameters for HSMs +type HSMConfig struct { + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Library Library `json:"library"` + MountPaths []MountPath `json:"mountpaths"` + Envs []corev1.EnvVar `json:"envs,omitempty"` + Daemon *Daemon `json:"daemon,omitempty"` +} + +// Library represents the configuration for an HSM library +type Library struct { + FilePath string `json:"filepath"` + Image string `json:"image"` + AutoUpdateDisabled bool `json:"autoUpdateDisabled,omitempty"` + Auth *Auth `json:"auth,omitempty"` +} + +// BuildPullSecret builds the string secret into the type expected by kubernetes +func (h *HSMConfig) BuildPullSecret() corev1.LocalObjectReference { + if h.Library.Auth != nil { + return h.Library.Auth.BuildPullSecret() + } + return corev1.LocalObjectReference{} +} + +// GetVolumes builds the volume spec into the type expected by kubernetes, by default +// the volume source is empty dir with memory as the storage medium +func (h *HSMConfig) GetVolumes() []corev1.Volume { + volumes := []corev1.Volume{} + for _, mount := range h.MountPaths { + // Skip building volume if using PVC, the PVC is known to the caller of method. + // The caller will build the proper PVC volume by setting the appropriate claim name. + if !mount.UsePVC { + volumes = append(volumes, mount.BuildVolume()) + } + } + return volumes +} + +// GetVolumeMounts builds the volume mount spec into the type expected by kubernetes +func (h *HSMConfig) GetVolumeMounts() []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, mount := range h.MountPaths { + // Skip building volume mount if using PVC, the PVC is known to the caller of method. + // The caller will build the proper PVC volume mount with the mount path specified + // in the HSM config + if !mount.UsePVC { + volumeMounts = append(volumeMounts, mount.BuildVolumeMount()) + } + } + return volumeMounts +} + +// GetEnvs builds the env var spec into the type expected by kubernetes +func (h *HSMConfig) GetEnvs() []corev1.EnvVar { + return h.Envs +} + +// Auth represents the authentication methods that are supported +type Auth struct { + ImagePullSecret string `json:"imagePullSecret,omitempty"` + // UserID string `json:"userid,omitempty"` + // Password string `json:"password,omitempty"` +} + +// BuildPullSecret builds the pull secret string into the type expected by kubernetes +func (a *Auth) BuildPullSecret() corev1.LocalObjectReference { + return corev1.LocalObjectReference{Name: a.ImagePullSecret} +} + +// MountPath represent the configuration of volume mounts on a container +type MountPath struct { + Name string `json:"name"` + Secret string `json:"secret"` + MountPath string `json:"mountpath"` + UsePVC bool `json:"usePVC"` + SubPath string `json:"subpath,omitempty"` + Paths []Path `json:"paths,omitempty"` + VolumeSource *corev1.VolumeSource `json:"volumeSource,omitempty"` +} + +type Path struct { + Key string `json:"key"` + Path string `json:"path"` +} + +// BuildVolumeMount builds the volume mount spec into the type expected by kubernetes +func (m *MountPath) BuildVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{ + Name: m.Name, + MountPath: m.MountPath, + SubPath: m.SubPath, + } +} + +// BuildVolume builds the volume spec into the type expected by kubernetes +func (m *MountPath) BuildVolume() corev1.Volume { + // In our initial HSM implementation, we made secrets as the default volume source and only + // allowed secrets based volumes. With the introducing of other HSM implementations (opencryptoki), + // other volume types had to be introduced and are now directly allowed in the configuration. + // However, to not break current users using older config this logic needs to persistent until + // we can deprecate older configs. + if m.VolumeSource == nil { + m.VolumeSource = &corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: m.Secret, + }, + } + } + + // Setting key/path is only supported for secrets, this is due to the fact + // that we made secrets as the default volume source and only allowed secrets based volumes. + // For other volume source types, they should configured directly in the hsm config. + for _, path := range m.Paths { + m.VolumeSource.Secret.Items = append(m.VolumeSource.Secret.Items, + corev1.KeyToPath{ + Key: path.Key, + Path: path.Path, + }, + ) + } + + return corev1.Volume{ + Name: m.Name, + VolumeSource: *m.VolumeSource, + } +} diff --git a/pkg/initializer/common/config/hsmconfig_test.go b/pkg/initializer/common/config/hsmconfig_test.go new file mode 100644 index 00000000..a1f0ae43 --- /dev/null +++ b/pkg/initializer/common/config/hsmconfig_test.go @@ -0,0 +1,145 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("HSM Config", func() { + var hsmConfig *config.HSMConfig + + BeforeEach(func() { + hsmConfig = &config.HSMConfig{ + Type: "hsm", + Version: "v1", + MountPaths: []config.MountPath{ + config.MountPath{ + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cert.pem", + Path: "cert.pem", + }, + { + Key: "key.pem", + Path: "key.pem", + }, + }, + }, + config.MountPath{ + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "env1", + Value: "env1value", + }, + }, + } + }) + + Context("volume mounts", func() { + It("builds volume mounts from config", func() { + vms := hsmConfig.GetVolumeMounts() + Expect(vms).To(ContainElements( + corev1.VolumeMount{ + Name: "hsmcrypto", + MountPath: "/hsm", + }, + corev1.VolumeMount{ + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + )) + }) + }) + + Context("volumes", func() { + It("builds volumes from config", func() { + v := hsmConfig.GetVolumes() + Expect(v).To(ContainElements( + corev1.Volume{ + Name: "hsmcrypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + Items: []corev1.KeyToPath{ + { + Key: "cert.pem", + Path: "cert.pem", + }, + { + Key: "key.pem", + Path: "key.pem", + }, + }, + }, + }, + }, + corev1.Volume{ + Name: "hsmconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + }, + }, + }, + )) + }) + }) + + Context("env vars", func() { + It("builds env vars from config", func() { + envs := hsmConfig.GetEnvs() + Expect(envs).To(ContainElements( + corev1.EnvVar{ + Name: "env1", + Value: "env1value", + }, + )) + }) + }) + + Context("build pull secret", func() { + It("returns empty LocalObjectReference obj if pull secret not passed in config", func() { + ps := hsmConfig.BuildPullSecret() + Expect(ps).To(Equal(corev1.LocalObjectReference{})) + }) + + It("returns LocalObjectReference with pull secret from config", func() { + hsmConfig.Library.Auth = &config.Auth{ + ImagePullSecret: "pullsecret", + } + ps := hsmConfig.BuildPullSecret() + Expect(ps.Name).To(Equal("pullsecret")) + }) + }) +}) diff --git a/pkg/initializer/common/config/hsmdaemon.go b/pkg/initializer/common/config/hsmdaemon.go new file mode 100644 index 00000000..7809266a --- /dev/null +++ b/pkg/initializer/common/config/hsmdaemon.go @@ -0,0 +1,118 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + corev1 "k8s.io/api/core/v1" +) + +const DAEMON_CHECK_CMD = "while true; do if [ -f /shared/daemon-launched ]; then break; fi; done" + +// Resource defines the contract required for adding a daemon init containter on to a kubernetes resource +type Resource interface { + AddContainer(add container.Container) + AppendVolumeIfMissing(volume corev1.Volume) + AppendPullSecret(imagePullSecret corev1.LocalObjectReference) +} + +// AddDaemonContainer appends an init container responsible for launching HSM daemon +// as a background process within the processNamespace of the pod +func AddDaemonContainer(config *HSMConfig, resource Resource, contResource corev1.ResourceRequirements, pvcMount *corev1.VolumeMount) { + t := true + f := false + + // The daemon needs to be started by root user, otherwise, results in this error: + // This daemon needs root privileges, but the effective user id is not 'root'. + user := int64(0) + + cont := corev1.Container{ + Name: "hsm-daemon", + Image: config.Daemon.Image, + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + Privileged: &t, + AllowPrivilegeEscalation: &t, + }, + Resources: contResource, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "shared", + MountPath: "/shared", + }, + }, + Env: config.Daemon.Envs, + } + + volumeMounts := config.GetVolumeMounts() + if pvcMount != nil { + volumeMounts = append(volumeMounts, *pvcMount) + } + + cont.VolumeMounts = append(cont.VolumeMounts, volumeMounts...) + if config.Daemon.Auth != nil { + resource.AppendPullSecret(config.BuildPullSecret()) + } + // if securityContext is passed in hsm config override the same + if config.Daemon.SecurityContext != nil { + if config.Daemon.SecurityContext.Privileged != nil { + cont.SecurityContext.Privileged = config.Daemon.SecurityContext.Privileged + } + if config.Daemon.SecurityContext.RunAsNonRoot != nil { + cont.SecurityContext.RunAsNonRoot = config.Daemon.SecurityContext.RunAsNonRoot + } + if config.Daemon.SecurityContext.RunAsUser != nil { + cont.SecurityContext.RunAsUser = config.Daemon.SecurityContext.RunAsUser + } + if config.Daemon.SecurityContext.AllowPrivilegeEscalation != nil { + cont.SecurityContext.AllowPrivilegeEscalation = config.Daemon.SecurityContext.AllowPrivilegeEscalation + } + } + + // if resources are passed in hsm config, override + if config.Daemon.Resources != nil { + cont.Resources = *config.Daemon.Resources + } + + resource.AddContainer(container.Container{Container: &cont}) +} + +// Daemon represents that configuration for the HSM Daemon +type Daemon struct { + Image string `json:"image"` + Envs []corev1.EnvVar `json:"envs,omitempty"` + Auth *Auth `json:"auth,omitempty"` + SecurityContext *container.SecurityContext `json:"securityContext,omitempty"` + Resources *corev1.ResourceRequirements `json:"daemon,omitempty"` +} + +// GetEnvs returns environment variables +func (d *Daemon) GetEnvs() []corev1.EnvVar { + return d.Envs +} + +// BuildPullSecret builds the string secret into the type expected by kubernetes +func (d *Daemon) BuildPullSecret() corev1.LocalObjectReference { + if d.Auth != nil { + return d.Auth.BuildPullSecret() + } + return corev1.LocalObjectReference{} +} diff --git a/pkg/initializer/common/config/mocks/crypto.go b/pkg/initializer/common/config/mocks/crypto.go new file mode 100644 index 00000000..f0e99df7 --- /dev/null +++ b/pkg/initializer/common/config/mocks/crypto.go @@ -0,0 +1,237 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" +) + +type Crypto struct { + GetCryptoStub func() (*config.Response, error) + getCryptoMutex sync.RWMutex + getCryptoArgsForCall []struct { + } + getCryptoReturns struct { + result1 *config.Response + result2 error + } + getCryptoReturnsOnCall map[int]struct { + result1 *config.Response + result2 error + } + PingCAStub func() error + pingCAMutex sync.RWMutex + pingCAArgsForCall []struct { + } + pingCAReturns struct { + result1 error + } + pingCAReturnsOnCall map[int]struct { + result1 error + } + ValidateStub func() error + validateMutex sync.RWMutex + validateArgsForCall []struct { + } + validateReturns struct { + result1 error + } + validateReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Crypto) GetCrypto() (*config.Response, error) { + fake.getCryptoMutex.Lock() + ret, specificReturn := fake.getCryptoReturnsOnCall[len(fake.getCryptoArgsForCall)] + fake.getCryptoArgsForCall = append(fake.getCryptoArgsForCall, struct { + }{}) + stub := fake.GetCryptoStub + fakeReturns := fake.getCryptoReturns + fake.recordInvocation("GetCrypto", []interface{}{}) + fake.getCryptoMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Crypto) GetCryptoCallCount() int { + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + return len(fake.getCryptoArgsForCall) +} + +func (fake *Crypto) GetCryptoCalls(stub func() (*config.Response, error)) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = stub +} + +func (fake *Crypto) GetCryptoReturns(result1 *config.Response, result2 error) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = nil + fake.getCryptoReturns = struct { + result1 *config.Response + result2 error + }{result1, result2} +} + +func (fake *Crypto) GetCryptoReturnsOnCall(i int, result1 *config.Response, result2 error) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = nil + if fake.getCryptoReturnsOnCall == nil { + fake.getCryptoReturnsOnCall = make(map[int]struct { + result1 *config.Response + result2 error + }) + } + fake.getCryptoReturnsOnCall[i] = struct { + result1 *config.Response + result2 error + }{result1, result2} +} + +func (fake *Crypto) PingCA() error { + fake.pingCAMutex.Lock() + ret, specificReturn := fake.pingCAReturnsOnCall[len(fake.pingCAArgsForCall)] + fake.pingCAArgsForCall = append(fake.pingCAArgsForCall, struct { + }{}) + stub := fake.PingCAStub + fakeReturns := fake.pingCAReturns + fake.recordInvocation("PingCA", []interface{}{}) + fake.pingCAMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Crypto) PingCACallCount() int { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + return len(fake.pingCAArgsForCall) +} + +func (fake *Crypto) PingCACalls(stub func() error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = stub +} + +func (fake *Crypto) PingCAReturns(result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + fake.pingCAReturns = struct { + result1 error + }{result1} +} + +func (fake *Crypto) PingCAReturnsOnCall(i int, result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + if fake.pingCAReturnsOnCall == nil { + fake.pingCAReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.pingCAReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Crypto) Validate() error { + fake.validateMutex.Lock() + ret, specificReturn := fake.validateReturnsOnCall[len(fake.validateArgsForCall)] + fake.validateArgsForCall = append(fake.validateArgsForCall, struct { + }{}) + stub := fake.ValidateStub + fakeReturns := fake.validateReturns + fake.recordInvocation("Validate", []interface{}{}) + fake.validateMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Crypto) ValidateCallCount() int { + fake.validateMutex.RLock() + defer fake.validateMutex.RUnlock() + return len(fake.validateArgsForCall) +} + +func (fake *Crypto) ValidateCalls(stub func() error) { + fake.validateMutex.Lock() + defer fake.validateMutex.Unlock() + fake.ValidateStub = stub +} + +func (fake *Crypto) ValidateReturns(result1 error) { + fake.validateMutex.Lock() + defer fake.validateMutex.Unlock() + fake.ValidateStub = nil + fake.validateReturns = struct { + result1 error + }{result1} +} + +func (fake *Crypto) ValidateReturnsOnCall(i int, result1 error) { + fake.validateMutex.Lock() + defer fake.validateMutex.Unlock() + fake.ValidateStub = nil + if fake.validateReturnsOnCall == nil { + fake.validateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.validateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Crypto) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + fake.validateMutex.RLock() + defer fake.validateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Crypto) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ config.Crypto = new(Crypto) diff --git a/pkg/initializer/common/config/nodeou.go b/pkg/initializer/common/config/nodeou.go new file mode 100644 index 00000000..7dfaa61c --- /dev/null +++ b/pkg/initializer/common/config/nodeou.go @@ -0,0 +1,57 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import "sigs.k8s.io/yaml" + +type NodeOUConfig struct { + NodeOUs NodeOUs +} + +type NodeOUs struct { + Enable bool + ClientOUIdentifier Identifier + PeerOUIdentifier Identifier + AdminOUIdentifier Identifier + OrdererOUIdentifier Identifier +} + +type Identifier struct { + Certificate string + OrganizationalUnitIdentifier string +} + +func NodeOUConfigFromBytes(nodeOU []byte) (*NodeOUConfig, error) { + nodeOUConfig := &NodeOUConfig{} + err := yaml.Unmarshal(nodeOU, nodeOUConfig) + if err != nil { + return nil, err + } + + return nodeOUConfig, nil +} + +func NodeOUConfigToBytes(config *NodeOUConfig) ([]byte, error) { + nodeOUBytes, err := yaml.Marshal(config) + if err != nil { + return nil, err + } + + return nodeOUBytes, nil +} diff --git a/pkg/initializer/common/enroller/client.go b/pkg/initializer/common/enroller/client.go new file mode 100644 index 00000000..5aa4bf11 --- /dev/null +++ b/pkg/initializer/common/enroller/client.go @@ -0,0 +1,31 @@ +//go:build !pkcs11 +// +build !pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/hyperledger/fabric-ca/lib" +) + +func GetClient(client *lib.Client, bccsp *commonapi.BCCSP) *lib.Client { + return client +} diff --git a/pkg/initializer/common/enroller/client_pkcs11.go b/pkg/initializer/common/enroller/client_pkcs11.go new file mode 100644 index 00000000..32e6c914 --- /dev/null +++ b/pkg/initializer/common/enroller/client_pkcs11.go @@ -0,0 +1,57 @@ +//go:build pkcs11 +// +build pkcs11 + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric/bccsp/factory" + "github.com/hyperledger/fabric/bccsp/pkcs11" +) + +func GetClient(client *lib.Client, bccsp *commonapi.BCCSP) *lib.Client { + if bccsp != nil { + if bccsp.PKCS11 != nil { + client.Config.CSP = &factory.FactoryOpts{ + ProviderName: bccsp.ProviderName, + Pkcs11Opts: &pkcs11.PKCS11Opts{ + SecLevel: bccsp.PKCS11.SecLevel, + HashFamily: bccsp.PKCS11.HashFamily, + Ephemeral: bccsp.PKCS11.Ephemeral, + Library: bccsp.PKCS11.Library, + Label: bccsp.PKCS11.Label, + Pin: bccsp.PKCS11.Pin, + SoftVerify: bccsp.PKCS11.SoftVerify, + Immutable: bccsp.PKCS11.Immutable, + }, + } + + if bccsp.PKCS11.FileKeyStore != nil { + client.Config.CSP.Pkcs11Opts.FileKeystore = &pkcs11.FileKeystoreOpts{ + KeyStorePath: bccsp.PKCS11.FileKeyStore.KeyStorePath, + } + } + } + } + + return client +} diff --git a/pkg/initializer/common/enroller/enroller.go b/pkg/initializer/common/enroller/enroller.go new file mode 100644 index 00000000..1f6cbcf8 --- /dev/null +++ b/pkg/initializer/common/enroller/enroller.go @@ -0,0 +1,141 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + "bytes" + "crypto/x509" + "encoding/pem" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/hyperledger/fabric-ca/lib" + "github.com/pkg/errors" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("init_enroller") + +//go:generate counterfeiter -o mocks/cryptoenroller.go -fake-name CryptoEnroller . CryptoEnroller + +type CryptoEnroller interface { + GetEnrollmentRequest() *current.Enrollment + Enroll() (*config.Response, error) + PingCA(time.Duration) error +} + +type Enroller struct { + Enroller CryptoEnroller + Timeout time.Duration +} + +func New(enroller CryptoEnroller) *Enroller { + return &Enroller{ + Enroller: enroller, + Timeout: 30 * time.Second, + } +} + +func (e *Enroller) GetCrypto() (*config.Response, error) { + log.Info("Getting crypto...") + resp, err := e.Enroller.Enroll() + if err != nil { + return nil, errors.Wrap(err, "failed to enroll with CA") + } + + // Store crypto + for _, adminCert := range e.Enroller.GetEnrollmentRequest().AdminCerts { + bytes, err := util.Base64ToBytes(adminCert) + if err != nil { + return nil, errors.Wrap(err, "failed to parse admin cert") + } + resp.AdminCerts = append(resp.AdminCerts, bytes) + } + + return resp, nil +} + +func (e *Enroller) PingCA() error { + log.Info("Check if CA is reachable before triggering enroll job") + return e.Enroller.PingCA(e.Timeout) +} + +func (e *Enroller) Validate() error { + req := e.Enroller.GetEnrollmentRequest() + + if req.CAHost == "" { + return errors.New("unable to enroll, CA host not specified") + } + + if req.CAPort == "" { + return errors.New("unable to enroll, CA port not specified") + } + + if req.EnrollID == "" { + return errors.New("unable to enroll, enrollment ID not specified") + } + + if req.EnrollSecret == "" { + return errors.New("unable to enroll, enrollment secret not specified") + } + + if req.CATLS.CACert == "" { + return errors.New("unable to enroll, CA TLS certificate not specified") + } + + return nil +} + +func ParseEnrollmentResponse(resp *config.Response, si *lib.GetCAInfoResponse) (*config.Response, error) { + chain := si.CAChain + for len(chain) > 0 { + var block *pem.Block + block, chain = pem.Decode(chain) + if block == nil { + break + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.Wrap(err, "Failed to parse certificate in the CA chain") + } + + if !cert.IsCA { + return nil, errors.New("A certificate in the CA chain is not a CA certificate") + } + + // If authority key id is not present or if it is present and equal to subject key id, + // then it is a root certificate + if len(cert.AuthorityKeyId) == 0 || bytes.Equal(cert.AuthorityKeyId, cert.SubjectKeyId) { + resp.CACerts = append(resp.CACerts, pem.EncodeToMemory(block)) + } else { + resp.IntermediateCerts = append(resp.IntermediateCerts, pem.EncodeToMemory(block)) + } + } + + // for intermediate cert, put the whole chain as is + if len(resp.IntermediateCerts) > 0 { + resp.IntermediateCerts = [][]byte{si.CAChain} + } + + return resp, nil +} diff --git a/pkg/initializer/common/enroller/enroller_suite_test.go b/pkg/initializer/common/enroller/enroller_suite_test.go new file mode 100644 index 00000000..cf8f8b10 --- /dev/null +++ b/pkg/initializer/common/enroller/enroller_suite_test.go @@ -0,0 +1,33 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestEnroller(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Enroller Suite") +} + +//go:generate counterfeiter -o mocks/client.go -fake-name Client ../../../k8s/controllerclient Client diff --git a/pkg/initializer/common/enroller/enroller_test.go b/pkg/initializer/common/enroller/enroller_test.go new file mode 100644 index 00000000..c5c19dc7 --- /dev/null +++ b/pkg/initializer/common/enroller/enroller_test.go @@ -0,0 +1,138 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller/mocks" +) + +var _ = Describe("Enroller", func() { + var ( + mockCryptoEnroller *mocks.CryptoEnroller + testEnroller *enroller.Enroller + ) + + BeforeEach(func() { + mockCryptoEnroller = &mocks.CryptoEnroller{} + testEnroller = &enroller.Enroller{ + Enroller: mockCryptoEnroller, + } + }) + + Context("get crypto", func() { + BeforeEach(func() { + mockCryptoEnroller.GetEnrollmentRequestReturns(¤t.Enrollment{}) + mockCryptoEnroller.EnrollReturns(&config.Response{}, nil) + }) + + It("returns response", func() { + resp, err := testEnroller.GetCrypto() + Expect(err).NotTo(HaveOccurred()) + Expect(resp).NotTo(BeNil()) + }) + + It("returns error if enroll fails", func() { + mockCryptoEnroller.EnrollReturns(nil, errors.New("enroll failed")) + + resp, err := testEnroller.GetCrypto() + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("enroll failed"))) + Expect(resp).To(BeNil()) + }) + }) + + Context("ping CA", func() { + It("returns true if ca reachable", func() { + err := testEnroller.PingCA() + Expect(err).To(BeNil()) + }) + + It("returns true if ca reachable", func() { + mockCryptoEnroller.PingCAReturns(errors.New("ping failed")) + + err := testEnroller.PingCA() + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("ping failed"))) + }) + }) + + Context("validate", func() { + var req *current.Enrollment + + BeforeEach(func() { + req = ¤t.Enrollment{ + CAHost: "host", + CAPort: "1234", + EnrollID: "id", + EnrollSecret: "secret", + CATLS: ¤t.CATLS{ + CACert: "cacert", + }, + } + mockCryptoEnroller.GetEnrollmentRequestReturns(req) + }) + + It("successfull validation returns no error", func() { + err := testEnroller.Validate() + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns error if missing CA host", func() { + req.CAHost = "" + + err := testEnroller.Validate() + Expect(err).To(MatchError("unable to enroll, CA host not specified")) + }) + + It("returns error if missing CA port", func() { + req.CAPort = "" + + err := testEnroller.Validate() + Expect(err).To(MatchError("unable to enroll, CA port not specified")) + }) + + It("returns error if missing enrollment ID", func() { + req.EnrollID = "" + + err := testEnroller.Validate() + Expect(err).To(MatchError("unable to enroll, enrollment ID not specified")) + }) + + It("returns error if missing enrollment secret", func() { + req.EnrollSecret = "" + + err := testEnroller.Validate() + Expect(err).To(MatchError("unable to enroll, enrollment secret not specified")) + }) + + It("returns error if missing CA TLS cert", func() { + req.CATLS.CACert = "" + + err := testEnroller.Validate() + Expect(err).To(MatchError("unable to enroll, CA TLS certificate not specified")) + }) + }) +}) diff --git a/pkg/initializer/common/enroller/fabcaclient.go b/pkg/initializer/common/enroller/fabcaclient.go new file mode 100644 index 00000000..d3dea88e --- /dev/null +++ b/pkg/initializer/common/enroller/fabcaclient.go @@ -0,0 +1,141 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/hyperledger/fabric-ca/lib" + catls "github.com/hyperledger/fabric-ca/lib/tls" + "github.com/pkg/errors" +) + +func NewFabCAClient(cfg *current.Enrollment, homeDir string, bccsp *commonapi.BCCSP, cert []byte) *FabCAClient { + client := &lib.Client{ + HomeDir: homeDir, + Config: &lib.ClientConfig{ + TLS: catls.ClientTLSConfig{ + Enabled: true, + CertFiles: []string{"tlsCert.pem"}, + }, + URL: fmt.Sprintf("https://%s:%s", cfg.CAHost, cfg.CAPort), + }, + } + + client = GetClient(client, bccsp) + return &FabCAClient{ + Client: client, + EnrollmentCfg: cfg, + CATLSCert: cert, + BCCSP: bccsp, + } +} + +type FabCAClient struct { + *lib.Client + + EnrollmentCfg *current.Enrollment + BCCSP *commonapi.BCCSP + CATLSCert []byte +} + +func (c *FabCAClient) GetHomeDir() string { + return c.HomeDir +} + +func (c *FabCAClient) SetURL(url string) { + c.Config.URL = url +} + +func (c *FabCAClient) GetConfig() *lib.ClientConfig { + return c.Config +} + +func (c *FabCAClient) GetTLSCert() []byte { + return c.CATLSCert +} + +func (c *FabCAClient) GetEnrollmentRequest() *current.Enrollment { + return c.EnrollmentCfg +} + +func (c *FabCAClient) SetHSMLibrary(library string) { + if c.BCCSP != nil { + c.BCCSP.PKCS11.Library = library + c.Client = GetClient(c.Client, c.BCCSP) + } +} + +func (c *FabCAClient) PingCA(timeout time.Duration) error { + url := fmt.Sprintf("%s/cainfo", c.Client.Config.URL) + log.Info(fmt.Sprintf("Pinging CA at '%s' with timeout value of %s", url, timeout.String())) + + rootCertPool := x509.NewCertPool() + rootCertPool.AppendCertsFromPEM(c.CATLSCert) + client := http.Client{ + Transport: &http.Transport{ + IdleConnTimeout: timeout, + Dial: (&net.Dialer{ + Timeout: timeout, + KeepAlive: timeout, + }).Dial, + TLSHandshakeTimeout: timeout / 2, + TLSClientConfig: &tls.Config{ + RootCAs: rootCertPool, + MinVersion: tls.VersionTLS12, // TLS 1.2 recommended, TLS 1.3 (current latest version) encouraged + }, + }, + Timeout: timeout, + } + + if err := c.healthCheck(client, url); err != nil { + return errors.Wrapf(err, "pinging '%s' failed", url) + } + + return nil +} + +func (c *FabCAClient) healthCheck(client http.Client, healthURL string) error { + ctx, cancel := context.WithTimeout(context.Background(), client.Timeout) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, healthURL, nil) + if err != nil { + return errors.Wrap(err, "invalid http request") + } + + resp, err := client.Do(req) + if err != nil { + return errors.Wrapf(err, "health check request failed") + } + + if resp.StatusCode != http.StatusOK { + return errors.Wrapf(err, "failed health check, ca is not running") + } + + return nil +} diff --git a/pkg/initializer/common/enroller/fabcaclient_test.go b/pkg/initializer/common/enroller/fabcaclient_test.go new file mode 100644 index 00000000..c6672568 --- /dev/null +++ b/pkg/initializer/common/enroller/fabcaclient_test.go @@ -0,0 +1,72 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + "net/http" + "net/http/httptest" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/hyperledger/fabric-ca/lib" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Fabric CA client", func() { + var ( + server *httptest.Server + fabCaClient *enroller.FabCAClient + ) + + BeforeSuite(func() { + server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Test request parameters + Expect(req.URL.String()).To(Equal("/cainfo")) + // Send response to be tested + rw.Write([]byte(`OK`)) + })) + + fabCaClient = &enroller.FabCAClient{ + Client: &lib.Client{ + Config: &lib.ClientConfig{ + URL: server.URL, + }, + }, + } + }) + + AfterSuite(func() { + server.Close() + }) + + Context("ping CA", func() { + It("pings /cainfo endpoint", func() { + err := fabCaClient.PingCA(30 * time.Second) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns error if pinging /cainfo endpoint fails", func() { + server.Close() + + err := fabCaClient.PingCA(30 * time.Second) + Expect(err).To(HaveOccurred()) + }) + }) +}) diff --git a/pkg/initializer/common/enroller/factory.go b/pkg/initializer/common/enroller/factory.go new file mode 100644 index 00000000..93438dc2 --- /dev/null +++ b/pkg/initializer/common/enroller/factory.go @@ -0,0 +1,76 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/cryptogen" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" +) + +//go:generate counterfeiter -o mocks/cryptoinstance.go -fake-name CryptoInstance . CryptoInstance + +type CryptoInstance interface { + runtime.Object + Instance + IsHSMEnabled() bool + UsingHSMProxy() bool + GetConfigOverride() (interface{}, error) +} + +func Factory(enrollment *current.Enrollment, k8sClient k8sclient.Client, instance CryptoInstance, storagePath string, scheme *runtime.Scheme, bytes []byte, timeouts HSMEnrollJobTimeouts) (*Enroller, error) { + caClient := NewFabCAClient(enrollment, storagePath, nil, bytes) + certEnroller := New(NewSWEnroller(caClient)) + + if instance.IsHSMEnabled() { + switch instance.UsingHSMProxy() { + case true: + log.Info("Using HSM Proxy enroller") + bccsp := cryptogen.InitBCCSP(instance) + caClient = NewFabCAClient(enrollment, storagePath, bccsp, bytes) + certEnroller = New(NewHSMProxyEnroller(caClient)) + case false: + hsmConfig, err := config.ReadHSMConfig(k8sClient, instance) + if err != nil { + return nil, errors.Wrap(err, "failed to read HSM config") + } + + bccsp := cryptogen.InitBCCSP(instance) + caClient = NewFabCAClient(enrollment, storagePath, bccsp, bytes) + + if hsmConfig.Daemon != nil { + log.Info("Using HSM Daemon enroller") + hsmDaemonEnroller := NewHSMDaemonEnroller(enrollment, instance, caClient, k8sClient, scheme, timeouts, hsmConfig) + certEnroller = New(hsmDaemonEnroller) + } else { + log.Info("Using HSM enroller") + hsmEnroller := NewHSMEnroller(enrollment, instance, caClient, k8sClient, scheme, timeouts, hsmConfig) + certEnroller = New(hsmEnroller) + } + } + } else { + log.Info("Using SW enroller") + } + + return certEnroller, nil +} diff --git a/pkg/initializer/common/enroller/factory_test.go b/pkg/initializer/common/enroller/factory_test.go new file mode 100644 index 00000000..8a249cd9 --- /dev/null +++ b/pkg/initializer/common/enroller/factory_test.go @@ -0,0 +1,78 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller/mocks" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("Enroller factory", func() { + var instance *mocks.CryptoInstance + + BeforeEach(func() { + instance = &mocks.CryptoInstance{} + }) + + Context("software enroller", func() { + It("returns software type enroller", func() { + e, err := enroller.Factory(¤t.Enrollment{}, &mocks.Client{}, instance, "/tmp", &runtime.Scheme{}, []byte("cert"), enroller.HSMEnrollJobTimeouts{}) + Expect(err).NotTo(HaveOccurred()) + + _, sw := e.Enroller.(*enroller.SWEnroller) + Expect(sw).To(Equal(true)) + }) + }) + + Context("HSM", func() { + BeforeEach(func() { + instance.IsHSMEnabledReturns(true) + }) + + Context("sidecar enroller", func() { + It("returns sidecar type enroller", func() { + e, err := enroller.Factory(¤t.Enrollment{}, &mocks.Client{}, instance, "/tmp", &runtime.Scheme{}, []byte("cert"), enroller.HSMEnrollJobTimeouts{}) + Expect(err).NotTo(HaveOccurred()) + + _, hsm := e.Enroller.(*enroller.HSMEnroller) + Expect(hsm).To(Equal(true)) + }) + }) + + Context("proxy enroller", func() { + BeforeEach(func() { + instance.UsingHSMProxyReturns(true) + }) + + It("returns sidecar type enroller", func() { + e, err := enroller.Factory(¤t.Enrollment{}, &mocks.Client{}, instance, "/tmp", &runtime.Scheme{}, []byte("cert"), enroller.HSMEnrollJobTimeouts{}) + Expect(err).NotTo(HaveOccurred()) + + _, hsm := e.Enroller.(*enroller.HSMProxyEnroller) + Expect(hsm).To(Equal(true)) + }) + }) + }) +}) diff --git a/pkg/initializer/common/enroller/hsmdaemonenroller.go b/pkg/initializer/common/enroller/hsmdaemonenroller.go new file mode 100644 index 00000000..8085d483 --- /dev/null +++ b/pkg/initializer/common/enroller/hsmdaemonenroller.go @@ -0,0 +1,346 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + "context" + "fmt" + "path/filepath" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" +) + +// HSMDaemonEnroller is responsible for enrolling with CAs to generate cryptographic materical +// for fabric nodes +type HSMDaemonEnroller struct { + CAClient HSMCAClient + Client k8sclient.Client + Instance Instance + Timeouts HSMEnrollJobTimeouts + Scheme *runtime.Scheme + Config *config.HSMConfig +} + +// NewHSMDaemonEnroller initializes and returns a pointer to HSMDaemonEnroller +func NewHSMDaemonEnroller(cfg *current.Enrollment, instance Instance, caclient HSMCAClient, client k8sclient.Client, scheme *runtime.Scheme, timeouts HSMEnrollJobTimeouts, hsmConfig *config.HSMConfig) *HSMDaemonEnroller { + return &HSMDaemonEnroller{ + CAClient: caclient, + Client: client, + Instance: instance, + Scheme: scheme, + Timeouts: timeouts, + Config: hsmConfig, + } +} + +// GetEnrollmentRequest returns the enrollment request defined on the ca client +func (e *HSMDaemonEnroller) GetEnrollmentRequest() *current.Enrollment { + return e.CAClient.GetEnrollmentRequest() +} + +// ReadKey is no-op method on HSM +func (e *HSMDaemonEnroller) ReadKey() ([]byte, error) { + return nil, nil +} + +// PingCA uses the ca client do ping the CA +func (e *HSMDaemonEnroller) PingCA(timeout time.Duration) error { + return e.CAClient.PingCA(timeout) +} + +// Enroll reaches out the CA to get back a signed certificate +func (e *HSMDaemonEnroller) Enroll() (*config.Response, error) { + log.Info(fmt.Sprintf("Enrolling using HSM Daemon")) + // Deleting CA client config is an unfortunate requirement since the ca client + // config map was not properly deleted after a successfull reenrollment request. + // This is problematic when recreating a resource with same name, as it will + // try to use old settings in the config map, which might no longer apply, thus + // it must be removed if found before proceeding. + if err := deleteCAClientConfig(e.Client, e.Instance); err != nil { + return nil, err + } + + e.CAClient.SetHSMLibrary(filepath.Join("/hsm/lib", filepath.Base(e.Config.Library.FilePath))) + if err := createRootTLSSecret(e.Client, e.CAClient, e.Scheme, e.Instance); err != nil { + return nil, err + } + + if err := createCAClientConfig(e.Client, e.CAClient, e.Scheme, e.Instance); err != nil { + return nil, err + } + + job := e.initHSMJob(e.Instance, e.Timeouts) + if err := e.Client.Create(context.TODO(), job.Job, k8sclient.CreateOption{ + Owner: e.Instance, + Scheme: e.Scheme, + }); err != nil { + return nil, errors.Wrap(err, "failed to create HSM ca initialization job") + } + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + if err := job.WaitUntilActive(e.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' active", job.GetName())) + + if err := job.WaitUntilContainerFinished(e.Client, CertGen); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + status, err := job.ContainerStatus(e.Client, CertGen) + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("Job status at finish '%s'", status)) + + switch status { + case jobv1.FAILED: + return nil, fmt.Errorf("Job '%s' finished unsuccessfully, not cleaning up pods to allow for error evaluation", job.GetName()) + case jobv1.COMPLETED: + if err := job.Delete(e.Client); err != nil { + return nil, err + } + + if err := deleteRootTLSSecret(e.Client, e.Instance); err != nil { + return nil, err + } + + if err := deleteCAClientConfig(e.Client, e.Instance); err != nil { + return nil, err + } + } + + name := fmt.Sprintf("ecert-%s-signcert", e.Instance.GetName()) + err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) { + sec := &corev1.Secret{} + log.Info(fmt.Sprintf("Waiting for secret '%s' to be created", name)) + err = e.Client.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: e.Instance.GetNamespace(), + }, sec) + if err != nil { + return false, nil + } + + return true, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to create secret '%s'", name) + } + + if err := setControllerReferences(e.Client, e.Scheme, e.Instance); err != nil { + return nil, err + } + + return &config.Response{}, nil +} + +const ( + // HSMClient is the name of container that contain the HSM client library + HSMClient = "hsm-client" + // CertGen is the name of container that runs the command to generate the certificate for the CA + CertGen = "certgen" +) + +func (e *HSMDaemonEnroller) initHSMJob(instance Instance, timeouts HSMEnrollJobTimeouts) *jobv1.Job { + hsmConfig := e.Config + req := e.CAClient.GetEnrollmentRequest() + + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + jobName := fmt.Sprintf("%s-enroll", instance.GetName()) + + f := false + t := true + user := int64(0) + backoffLimit := int32(0) + mountPath := "/shared" + pvcVolumeName := fmt.Sprintf("%s-pvc-volume", instance.GetName()) + + k8sJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "name": jobName, + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: instance.GetName(), + ImagePullSecrets: util.AppendImagePullSecretIfMissing(instance.GetPullSecrets(), hsmConfig.BuildPullSecret()), + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + { + Name: HSMClient, + Image: hsmConfig.Library.Image, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + AllowPrivilegeEscalation: &t, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: instance.GetResource(current.INIT), + }, + }, + Containers: []corev1.Container{ + { + Name: CertGen, + Image: instance.EnrollerImage(), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + Privileged: &t, + }, + Env: hsmConfig.GetEnvs(), + Command: []string{ + "sh", + "-c", + }, + Args: []string{ + fmt.Sprintf(config.DAEMON_CHECK_CMD+" && /usr/local/bin/enroller node enroll %s %s %s %s %s %s %s %s %s", e.CAClient.GetHomeDir(), "/tmp/fabric-ca-client-config.yaml", req.CAHost, req.CAPort, req.CAName, instance.GetName(), instance.GetNamespace(), req.EnrollID, req.EnrollSecret), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "tlscertfile", + MountPath: fmt.Sprintf("%s/tlsCert.pem", e.CAClient.GetHomeDir()), + SubPath: "tlsCert.pem", + }, + { + Name: "clientconfig", + MountPath: fmt.Sprintf("/tmp/%s", "fabric-ca-client-config.yaml"), + SubPath: "fabric-ca-client-config.yaml", + }, + { + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + }, + { + Name: "shared", + MountPath: "/shared", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + { + Name: "tlscertfile", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-init-roottls", instance.GetName()), + }, + }, + }, + { + Name: "clientconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + }, + }, + }, + }, + { + Name: pvcVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.PVCName(), + }, + }, + }, + }, + }, + }, + }, + } + + job := jobv1.New(k8sJob, &jobv1.Timeouts{ + WaitUntilActive: timeouts.JobStart.Get(), + WaitUntilFinished: timeouts.JobCompletion.Get(), + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, hsmConfig.GetVolumes()...) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, hsmConfig.GetVolumeMounts()...) + + // If daemon settings are configured in HSM config, create a sidecar that is running the daemon image + if e.Config.Daemon != nil { + // Certain token information requires to be stored in persistent store, the administrator + // responsible for configuring HSM sets the HSM config to point to the path where the PVC + // needs to be mounted. + var pvcMount *corev1.VolumeMount + for _, vm := range e.Config.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: pvcVolumeName, + MountPath: vm.MountPath, + } + } + } + + // Add daemon container to the deployment + config.AddDaemonContainer(e.Config, job, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the CertGen container + if pvcMount != nil { + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, *pvcMount) + } + } + + return job +} diff --git a/pkg/initializer/common/enroller/hsmdaemonenroller_test.go b/pkg/initializer/common/enroller/hsmdaemonenroller_test.go new file mode 100644 index 00000000..50c19687 --- /dev/null +++ b/pkg/initializer/common/enroller/hsmdaemonenroller_test.go @@ -0,0 +1,385 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + ccmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller/mocks" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("HSM Daemon sidecar enroller", func() { + var ( + e *enroller.HSMDaemonEnroller + ccClient *ccmocks.Client + hsmcaClient *mocks.HSMCAClient + instance *mocks.Instance + ) + + BeforeEach(func() { + instance = &mocks.Instance{} + instance.GetNameReturns("test") + instance.PVCNameReturns("test-pvc") + + ccClient = &ccmocks.Client{ + GetStub: func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + j := obj.(*batchv1.Job) + j.Status.Active = int32(1) + j.Name = "test-job" + } + return nil + }, + ListStub: func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: enroller.CertGen, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: int32(0), + }, + }, + }, + }, + Phase: corev1.PodSucceeded, + }, + }} + } + return nil + }, + } + + hsmcaClient = &mocks.HSMCAClient{} + hsmcaClient.GetEnrollmentRequestReturns(¤t.Enrollment{ + CATLS: ¤t.CATLS{ + CACert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVZi84bk94M2NqM1htVzNDSUo1L0Q1ejRRcUVvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBek1ERTNNamd3TUZvWERUTTBNVEF5TmpFM01qZ3dNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVSbzNmbUc2UHkyUHd6cUMwNnFWZDlFOFgKZ044eldqZzFMb3lnMmsxdkQ4MXY1dENRRytCTVozSUJGQnI2VTRhc0tZTUREakd6TElERmdUUTRjVDd1VktORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZFa0RtUHhjbTdGcXZSMXllN0tNNGdLLy9KZ1JNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRC92QVFVSEh2SWwKQWZZLzM5UWdEU2ltTWpMZnhPTG44NllyR1EvWHpkQVpBaUFpUmlyZmlMdzVGbXBpRDhtYmlmRjV4bzdFUzdqNApaUWQyT0FUNCt5OWE0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + }, + }) + + hsmConfig := &config.HSMConfig{ + Type: "hsm", + Version: "v1", + Library: config.Library{ + FilePath: "/usr/lib/libCryptoki2_64.so", + Image: "ghcr.io/ibm-blockchain/ibp-pkcs11-proxy/gemalto-client:skarim-amd64", + Auth: &config.Auth{ + ImagePullSecret: "hsmpullsecret", + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "DUMMY_ENV_NAME", + Value: "DUMMY_ENV_VALUE", + }, + }, + Daemon: &config.Daemon{ + Image: "ghcr.io/ibm-blockchain/ibp-pkcs11-proxy/hsmdaemon:skarim-amd64", + Auth: &config.Auth{ + ImagePullSecret: "hsmpullsecret", + }, + Envs: []corev1.EnvVar{ + { + Name: "DAEMON_ENV_NAME", + Value: "DAEMON_ENV_VALUE", + }, + }, + }, + MountPaths: []config.MountPath{ + { + MountPath: "/pvc/mount/path", + UsePVC: true, + }, + { + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + }, + }, + { + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + }, + } + + e = &enroller.HSMDaemonEnroller{ + Config: hsmConfig, + Client: ccClient, + Instance: instance, + CAClient: hsmcaClient, + Timeouts: enroller.HSMEnrollJobTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }, + } + }) + + Context("enroll", func() { + It("returns error if creating ca crypto secret fails", func() { + ccClient.CreateReturnsOnCall(0, errors.New("failed to create root TLS secret")) + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to create root TLS secret"))) + }) + + It("returns error if creating ca config map fails", func() { + ccClient.CreateReturnsOnCall(1, errors.New("failed to create ca config map")) + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to create ca config map"))) + }) + + It("returns error if creating job fails", func() { + ccClient.CreateReturnsOnCall(2, errors.New("failed to create job")) + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to create job"))) + }) + + Context("job start timeout", func() { + BeforeEach(func() { + ccClient.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + j := obj.(*batchv1.Job) + j.Status.Active = int32(0) + j.Name = "test-job" + + } + return nil + } + }) + + It("returns error if job doesn't start before timeout", func() { + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("job failed to start"))) + }) + }) + + Context("job fails", func() { + When("job timesout", func() { + BeforeEach(func() { + ccClient.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{ + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: enroller.CertGen, + State: corev1.ContainerState{}, + }, + }, + }, + }, + } + } + return nil + } + }) + + It("returns error", func() { + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to finish"))) + }) + }) + + When("pod enters failed state", func() { + BeforeEach(func() { + ccClient.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{ + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: enroller.CertGen, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: int32(1), + }, + }, + }, + }, + }, + }, + } + } + return nil + } + }) + + It("returns error", func() { + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("finished unsuccessfully, not cleaning up pods to allow for error"))) + }) + }) + }) + + It("returns no error on successfull enroll", func() { + resp, err := e.Enroll() + Expect(err).NotTo(HaveOccurred()) + Expect(resp).NotTo(BeNil()) + + By("creating a job resource", func() { + _, obj, _ := ccClient.CreateArgsForCall(2) + Expect(obj).NotTo(BeNil()) + + job := obj.(*batchv1.Job) + Expect(len(job.Spec.Template.Spec.Containers)).To(Equal(2)) + + Expect(job.Spec.Template.Spec.Containers[0].Env).To(Equal([]corev1.EnvVar{ + { + Name: "DUMMY_ENV_NAME", + Value: "DUMMY_ENV_VALUE", + }, + })) + + Expect(job.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElements([]corev1.VolumeMount{ + { + Name: "hsmcrypto", + MountPath: "/hsm", + }, + { + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + { + Name: fmt.Sprintf("%s-pvc-volume", instance.GetName()), + MountPath: "/pvc/mount/path", + }, + })) + + Expect(job.Spec.Template.Spec.Containers[1].Env).To(Equal([]corev1.EnvVar{ + { + Name: "DAEMON_ENV_NAME", + Value: "DAEMON_ENV_VALUE", + }, + })) + + Expect(job.Spec.Template.Spec.Containers[1].VolumeMounts).To(ContainElements([]corev1.VolumeMount{ + { + Name: "shared", + MountPath: "/shared", + }, + { + Name: "hsmcrypto", + MountPath: "/hsm", + }, + { + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + { + Name: fmt.Sprintf("%s-pvc-volume", instance.GetName()), + MountPath: "/pvc/mount/path", + }, + })) + + Expect(job.Spec.Template.Spec.Volumes).To(ContainElements([]corev1.Volume{ + { + Name: fmt.Sprintf("%s-pvc-volume", instance.GetName()), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + { + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + { + Name: "hsmconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + }, + }, + }, + { + Name: "hsmcrypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + Items: []corev1.KeyToPath{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + }, + }, + }, + }, + })) + }) + + By("deleting completed job", func() { + // One delete to clean up ca config map before starting job + // Second delete to delete job + // Third delete to delete associated pod + // Fourth delete to delete root tls secret + // Fifth delete to delete ca config map + Expect(ccClient.DeleteCallCount()).To(Equal(5)) + }) + + By("setting controller reference on resources created by enroll job", func() { + Expect(ccClient.UpdateCallCount()).To(Equal(4)) + }) + }) + }) + + // TODO: Add more tests for error path testing +}) diff --git a/pkg/initializer/common/enroller/hsmenroller.go b/pkg/initializer/common/enroller/hsmenroller.go new file mode 100644 index 00000000..18f51f2d --- /dev/null +++ b/pkg/initializer/common/enroller/hsmenroller.go @@ -0,0 +1,456 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/hyperledger/fabric-ca/lib" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +//go:generate counterfeiter -o mocks/instance.go -fake-name Instance . Instance +type Instance interface { + metav1.Object + EnrollerImage() string + GetPullSecrets() []corev1.LocalObjectReference + PVCName() string + GetResource(current.Component) corev1.ResourceRequirements +} + +//go:generate counterfeiter -o mocks/hsmcaclient.go -fake-name HSMCAClient . HSMCAClient +type HSMCAClient interface { + GetEnrollmentRequest() *current.Enrollment + GetHomeDir() string + PingCA(time.Duration) error + SetHSMLibrary(string) + GetConfig() *lib.ClientConfig +} + +type HSMEnrollJobTimeouts struct { + JobStart common.Duration `json:"jobStart" yaml:"jobStart"` + JobCompletion common.Duration `json:"jobCompletion" yaml:"jobCompletion"` +} + +type HSMEnroller struct { + CAClient HSMCAClient + Client k8sclient.Client + Instance Instance + Timeouts HSMEnrollJobTimeouts + Scheme *runtime.Scheme + Config *config.HSMConfig +} + +func NewHSMEnroller(cfg *current.Enrollment, instance Instance, caclient HSMCAClient, client k8sclient.Client, scheme *runtime.Scheme, timeouts HSMEnrollJobTimeouts, hsmConfig *config.HSMConfig) *HSMEnroller { + return &HSMEnroller{ + CAClient: caclient, + Client: client, + Instance: instance, + Scheme: scheme, + Timeouts: timeouts, + Config: hsmConfig, + } +} + +func (e *HSMEnroller) GetEnrollmentRequest() *current.Enrollment { + return e.CAClient.GetEnrollmentRequest() +} + +func (e *HSMEnroller) ReadKey() ([]byte, error) { + return nil, nil +} + +func (e *HSMEnroller) PingCA(timeout time.Duration) error { + return e.CAClient.PingCA(timeout) +} + +func (e *HSMEnroller) Enroll() (*config.Response, error) { + // Deleting CA client config is an unfortunate requirement since the ca client + // config map was not properly deleted after a successfull reenrollment request. + // This is problematic when recreating a resource with same name, as it will + // try to use old settings in the config map, which might no longer apply, thus + // it must be removed if found before proceeding. + if err := deleteCAClientConfig(e.Client, e.Instance); err != nil { + return nil, err + } + + e.CAClient.SetHSMLibrary(filepath.Join("/hsm/lib", filepath.Base(e.Config.Library.FilePath))) + if err := createRootTLSSecret(e.Client, e.CAClient, e.Scheme, e.Instance); err != nil { + return nil, err + } + + if err := createCAClientConfig(e.Client, e.CAClient, e.Scheme, e.Instance); err != nil { + return nil, err + } + + job := e.initHSMJob(e.Instance, e.Timeouts) + if err := e.Client.Create(context.TODO(), job.Job, k8sclient.CreateOption{ + Owner: e.Instance, + Scheme: e.Scheme, + }); err != nil { + return nil, errors.Wrap(err, "failed to create HSM ca initialization job") + } + log.Info(fmt.Sprintf("Job '%s' created", job.GetName())) + + if err := job.WaitUntilActive(e.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' active", job.GetName())) + + if err := job.WaitUntilFinished(e.Client); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Job '%s' finished", job.GetName())) + + status, err := job.Status(e.Client) + if err != nil { + return nil, err + } + + switch status { + case jobv1.FAILED: + return nil, fmt.Errorf("Job '%s' finished unsuccessfully, not cleaning up pods to allow for error evaluation", job.GetName()) + case jobv1.COMPLETED: + if err := job.Delete(e.Client); err != nil { + return nil, err + } + + if err := deleteRootTLSSecret(e.Client, e.Instance); err != nil { + return nil, err + } + + if err := deleteCAClientConfig(e.Client, e.Instance); err != nil { + return nil, err + } + } + + name := fmt.Sprintf("ecert-%s-signcert", e.Instance.GetName()) + err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) { + sec := &corev1.Secret{} + log.Info(fmt.Sprintf("Waiting for secret '%s' to be created", name)) + err = e.Client.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: e.Instance.GetNamespace(), + }, sec) + if err != nil { + return false, nil + } + + return true, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to create secret '%s'", name) + } + + if err := setControllerReferences(e.Client, e.Scheme, e.Instance); err != nil { + return nil, err + } + + return &config.Response{}, nil +} + +func setControllerReferences(client k8sclient.Client, scheme *runtime.Scheme, instance Instance) error { + if err := setControllerReferenceFor(fmt.Sprintf("ecert-%s-signcert", instance.GetName()), false, client, scheme, instance); err != nil { + return err + } + + if err := setControllerReferenceFor(fmt.Sprintf("ecert-%s-cacerts", instance.GetName()), false, client, scheme, instance); err != nil { + return err + } + + if err := setControllerReferenceFor(fmt.Sprintf("ecert-%s-admincerts", instance.GetName()), true, client, scheme, instance); err != nil { + return err + } + + if err := setControllerReferenceFor(fmt.Sprintf("ecert-%s-intercerts", instance.GetName()), true, client, scheme, instance); err != nil { + return err + } + + return nil +} + +func setControllerReferenceFor(name string, skipIfNotFound bool, client k8sclient.Client, scheme *runtime.Scheme, instance Instance) error { + nn := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + sec := &corev1.Secret{} + if err := client.Get(context.TODO(), nn, sec); err != nil { + if skipIfNotFound { + return nil + } + + return err + } + + if err := client.Update(context.TODO(), sec, k8sclient.UpdateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrapf(err, "failed to update secret '%s' with controller reference", instance.GetName()) + } + + return nil +} + +func createRootTLSSecret(client k8sclient.Client, caClient HSMCAClient, scheme *runtime.Scheme, instance Instance) error { + tlsCertBytes, err := caClient.GetEnrollmentRequest().GetCATLSBytes() + if err != nil { + return err + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-roottls", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + Data: map[string][]byte{ + "tlsCert.pem": tlsCertBytes, + }, + } + + if err := client.Create(context.TODO(), secret, k8sclient.CreateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrap(err, "failed to create secret") + } + + return nil +} + +func deleteRootTLSSecret(client k8sclient.Client, instance Instance) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-roottls", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + } + + if err := client.Delete(context.TODO(), secret); err != nil { + return errors.Wrap(err, "failed to delete secret") + } + + return nil +} + +func createCAClientConfig(client k8sclient.Client, caClient HSMCAClient, scheme *runtime.Scheme, instance Instance) error { + configBytes, err := yaml.Marshal(caClient.GetConfig()) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + BinaryData: map[string][]byte{ + "fabric-ca-client-config.yaml": configBytes, + }, + } + + if err := client.Create(context.TODO(), cm, k8sclient.CreateOption{ + Owner: instance, + Scheme: scheme, + }); err != nil { + return errors.Wrap(err, "failed to create config map") + } + + return nil +} + +func deleteCAClientConfig(k8sClient k8sclient.Client, instance Instance) error { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + } + + if err := k8sClient.Delete(context.TODO(), cm); client.IgnoreNotFound(err) != nil { + return errors.Wrap(err, "failed to delete config map") + } + + return nil +} + +func (e *HSMEnroller) initHSMJob(instance Instance, timeouts HSMEnrollJobTimeouts) *jobv1.Job { + hsmConfig := e.Config + req := e.CAClient.GetEnrollmentRequest() + + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + jobName := fmt.Sprintf("%s-enroll", instance.GetName()) + + f := false + user := int64(0) + backoffLimit := int32(0) + mountPath := "/shared" + + k8sJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "name": jobName, + "owner": instance.GetName(), + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: instance.GetName(), + ImagePullSecrets: util.AppendImagePullSecretIfMissing(instance.GetPullSecrets(), hsmConfig.BuildPullSecret()), + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + corev1.Container{ + Name: "hsm-client", + Image: hsmConfig.Library.Image, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Containers: []corev1.Container{ + corev1.Container{ + Name: "init", + Image: instance.EnrollerImage(), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + Env: hsmConfig.GetEnvs(), + Command: []string{ + "sh", + "-c", + fmt.Sprintf("/usr/local/bin/enroller node enroll %s %s %s %s %s %s %s %s %s", e.CAClient.GetHomeDir(), "/tmp/fabric-ca-client-config.yaml", req.CAHost, req.CAPort, req.CAName, instance.GetName(), instance.GetNamespace(), req.EnrollID, req.EnrollSecret), + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "tlscertfile", + MountPath: fmt.Sprintf("%s/tlsCert.pem", e.CAClient.GetHomeDir()), + SubPath: "tlsCert.pem", + }, + corev1.VolumeMount{ + Name: "clientconfig", + MountPath: fmt.Sprintf("/tmp/%s", "fabric-ca-client-config.yaml"), + SubPath: "fabric-ca-client-config.yaml", + }, + corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + corev1.Volume{ + Name: "tlscertfile", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-init-roottls", instance.GetName()), + }, + }, + }, + corev1.Volume{ + Name: "clientconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-init-config", instance.GetName()), + }, + }, + }, + }, + }, + }, + }, + }, + } + + job := jobv1.New(k8sJob, &jobv1.Timeouts{ + WaitUntilActive: timeouts.JobStart.Get(), + WaitUntilFinished: timeouts.JobCompletion.Get(), + }) + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, hsmConfig.GetVolumes()...) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, hsmConfig.GetVolumeMounts()...) + + return job +} diff --git a/pkg/initializer/common/enroller/hsmenroller_test.go b/pkg/initializer/common/enroller/hsmenroller_test.go new file mode 100644 index 00000000..808bba64 --- /dev/null +++ b/pkg/initializer/common/enroller/hsmenroller_test.go @@ -0,0 +1,293 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + ccmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller/mocks" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("HSM sidecar enroller", func() { + var ( + e *enroller.HSMEnroller + ccClient *ccmocks.Client + hsmcaClient *mocks.HSMCAClient + instance *mocks.Instance + ) + + BeforeEach(func() { + instance = &mocks.Instance{} + instance.GetNameReturns("test") + + ccClient = &ccmocks.Client{ + GetStub: func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + j := obj.(*batchv1.Job) + j.Status.Active = int32(1) + j.Name = "test-job" + } + return nil + }, + ListStub: func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, + }, + Phase: corev1.PodSucceeded, + }, + }} + } + return nil + }, + } + + hsmcaClient = &mocks.HSMCAClient{} + hsmcaClient.GetEnrollmentRequestReturns(¤t.Enrollment{ + CATLS: ¤t.CATLS{ + CACert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVZi84bk94M2NqM1htVzNDSUo1L0Q1ejRRcUVvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBek1ERTNNamd3TUZvWERUTTBNVEF5TmpFM01qZ3dNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVSbzNmbUc2UHkyUHd6cUMwNnFWZDlFOFgKZ044eldqZzFMb3lnMmsxdkQ4MXY1dENRRytCTVozSUJGQnI2VTRhc0tZTUREakd6TElERmdUUTRjVDd1VktORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZFa0RtUHhjbTdGcXZSMXllN0tNNGdLLy9KZ1JNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRC92QVFVSEh2SWwKQWZZLzM5UWdEU2ltTWpMZnhPTG44NllyR1EvWHpkQVpBaUFpUmlyZmlMdzVGbXBpRDhtYmlmRjV4bzdFUzdqNApaUWQyT0FUNCt5OWE0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + }, + }) + + hsmConfig := &config.HSMConfig{ + Type: "hsm", + Version: "v1", + Library: config.Library{ + FilePath: "/usr/lib/libCryptoki2_64.so", + Image: "ghcr.io/ibm-blockchain/ibp-pkcs11-proxy/gemalto-client:skarim-amd64", + Auth: &config.Auth{ + ImagePullSecret: "hsmpullsecret", + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "DUMMY_ENV_NAME", + Value: "DUMMY_ENV_VALUE", + }, + }, + MountPaths: []config.MountPath{ + { + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + }, + }, + { + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + }, + } + + e = &enroller.HSMEnroller{ + Config: hsmConfig, + Client: ccClient, + Instance: instance, + CAClient: hsmcaClient, + Timeouts: enroller.HSMEnrollJobTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }, + } + }) + + Context("enroll", func() { + It("returns error if creating ca crypto secret fails", func() { + ccClient.CreateReturnsOnCall(0, errors.New("failed to create root TLS secret")) + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to create root TLS secret"))) + }) + + It("returns error if creating ca config map fails", func() { + ccClient.CreateReturnsOnCall(1, errors.New("failed to create ca config map")) + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to create ca config map"))) + }) + + It("returns error if creating job fails", func() { + ccClient.CreateReturnsOnCall(2, errors.New("failed to create job")) + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to create job"))) + }) + + Context("job start timeout", func() { + BeforeEach(func() { + ccClient.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + j := obj.(*batchv1.Job) + j.Status.Active = int32(0) + j.Name = "test-job" + + } + return nil + } + }) + + It("returns error if job doesn't start before timeout", func() { + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("job failed to start"))) + }) + }) + + Context("job fails", func() { + When("job timesout", func() { + BeforeEach(func() { + ccClient.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{} + } + return nil + } + }) + + It("returns error", func() { + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("failed to finish"))) + }) + }) + + When("pod enters failed state", func() { + BeforeEach(func() { + ccClient.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + p := obj.(*corev1.PodList) + p.Items = []corev1.Pod{{ + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + }, + }} + } + return nil + } + }) + + It("returns error", func() { + _, err := e.Enroll() + Expect(err).To(MatchError(ContainSubstring("finished unsuccessfully, not cleaning up pods to allow for error"))) + }) + }) + }) + + It("returns no error on successfull enroll", func() { + resp, err := e.Enroll() + Expect(err).NotTo(HaveOccurred()) + Expect(resp).NotTo(BeNil()) + + By("creating a job resource", func() { + _, obj, _ := ccClient.CreateArgsForCall(2) + Expect(obj).NotTo(BeNil()) + + job := obj.(*batchv1.Job) + Expect(job.Spec.Template.Spec.Containers[0].Env).To(Equal([]corev1.EnvVar{ + { + Name: "DUMMY_ENV_NAME", + Value: "DUMMY_ENV_VALUE", + }, + })) + + Expect(job.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElements([]corev1.VolumeMount{ + { + Name: "hsmcrypto", + MountPath: "/hsm", + }, + { + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + }, + })) + + Expect(job.Spec.Template.Spec.Volumes).To(ContainElements([]corev1.Volume{ + { + Name: "hsmconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + }, + }, + }, + { + Name: "hsmcrypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hsmcrypto", + Items: []corev1.KeyToPath{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + }, + }, + }, + }, + })) + }) + + By("deleting completed job", func() { + // One delete to clean up ca config map before starting job + // Second delete to delete job + // Third delete to delete associated pod + // Fourth delete to delete root tls secret + // Fifth delete to delete ca config map + Expect(ccClient.DeleteCallCount()).To(Equal(5)) + }) + + By("setting controller reference on resources created by enroll job", func() { + Expect(ccClient.UpdateCallCount()).To(Equal(4)) + }) + }) + }) + + // TODO: Add more tests for error path testing +}) diff --git a/pkg/initializer/common/enroller/hsmproxyenroller.go b/pkg/initializer/common/enroller/hsmproxyenroller.go new file mode 100644 index 00000000..5b659a59 --- /dev/null +++ b/pkg/initializer/common/enroller/hsmproxyenroller.go @@ -0,0 +1,62 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/hyperledger/fabric-ca/api" + "github.com/hyperledger/fabric-ca/lib" +) + +type HSMProxyCAClient interface { + Init() error + Enroll(*api.EnrollmentRequest) (*lib.EnrollmentResponse, error) + GetEnrollmentRequest() *current.Enrollment + GetHomeDir() string + GetTLSCert() []byte + PingCA(time.Duration) error + SetHSMLibrary(string) +} + +type HSMProxyEnroller struct { + Client HSMProxyCAClient + Req *current.Enrollment +} + +func NewHSMProxyEnroller(caClient HSMProxyCAClient) *HSMProxyEnroller { + return &HSMProxyEnroller{ + Client: caClient, + } +} + +func (e *HSMProxyEnroller) GetEnrollmentRequest() *current.Enrollment { + return e.Client.GetEnrollmentRequest() +} + +func (e *HSMProxyEnroller) PingCA(timeout time.Duration) error { + return e.Client.PingCA(timeout) +} + +func (e *HSMProxyEnroller) Enroll() (*config.Response, error) { + e.Client.SetHSMLibrary("/usr/local/lib/libpkcs11-proxy.so") + return enroll(e.Client) +} diff --git a/pkg/initializer/common/enroller/mocks/caclient.go b/pkg/initializer/common/enroller/mocks/caclient.go new file mode 100644 index 00000000..5eaacdfd --- /dev/null +++ b/pkg/initializer/common/enroller/mocks/caclient.go @@ -0,0 +1,454 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/hyperledger/fabric-ca/api" + "github.com/hyperledger/fabric-ca/lib" +) + +type CAClient struct { + EnrollStub func(*api.EnrollmentRequest) (*lib.EnrollmentResponse, error) + enrollMutex sync.RWMutex + enrollArgsForCall []struct { + arg1 *api.EnrollmentRequest + } + enrollReturns struct { + result1 *lib.EnrollmentResponse + result2 error + } + enrollReturnsOnCall map[int]struct { + result1 *lib.EnrollmentResponse + result2 error + } + GetEnrollmentRequestStub func() *v1beta1.Enrollment + getEnrollmentRequestMutex sync.RWMutex + getEnrollmentRequestArgsForCall []struct { + } + getEnrollmentRequestReturns struct { + result1 *v1beta1.Enrollment + } + getEnrollmentRequestReturnsOnCall map[int]struct { + result1 *v1beta1.Enrollment + } + GetHomeDirStub func() string + getHomeDirMutex sync.RWMutex + getHomeDirArgsForCall []struct { + } + getHomeDirReturns struct { + result1 string + } + getHomeDirReturnsOnCall map[int]struct { + result1 string + } + GetTLSCertStub func() []byte + getTLSCertMutex sync.RWMutex + getTLSCertArgsForCall []struct { + } + getTLSCertReturns struct { + result1 []byte + } + getTLSCertReturnsOnCall map[int]struct { + result1 []byte + } + InitStub func() error + initMutex sync.RWMutex + initArgsForCall []struct { + } + initReturns struct { + result1 error + } + initReturnsOnCall map[int]struct { + result1 error + } + PingCAStub func(time.Duration) error + pingCAMutex sync.RWMutex + pingCAArgsForCall []struct { + arg1 time.Duration + } + pingCAReturns struct { + result1 error + } + pingCAReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CAClient) Enroll(arg1 *api.EnrollmentRequest) (*lib.EnrollmentResponse, error) { + fake.enrollMutex.Lock() + ret, specificReturn := fake.enrollReturnsOnCall[len(fake.enrollArgsForCall)] + fake.enrollArgsForCall = append(fake.enrollArgsForCall, struct { + arg1 *api.EnrollmentRequest + }{arg1}) + stub := fake.EnrollStub + fakeReturns := fake.enrollReturns + fake.recordInvocation("Enroll", []interface{}{arg1}) + fake.enrollMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CAClient) EnrollCallCount() int { + fake.enrollMutex.RLock() + defer fake.enrollMutex.RUnlock() + return len(fake.enrollArgsForCall) +} + +func (fake *CAClient) EnrollCalls(stub func(*api.EnrollmentRequest) (*lib.EnrollmentResponse, error)) { + fake.enrollMutex.Lock() + defer fake.enrollMutex.Unlock() + fake.EnrollStub = stub +} + +func (fake *CAClient) EnrollArgsForCall(i int) *api.EnrollmentRequest { + fake.enrollMutex.RLock() + defer fake.enrollMutex.RUnlock() + argsForCall := fake.enrollArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CAClient) EnrollReturns(result1 *lib.EnrollmentResponse, result2 error) { + fake.enrollMutex.Lock() + defer fake.enrollMutex.Unlock() + fake.EnrollStub = nil + fake.enrollReturns = struct { + result1 *lib.EnrollmentResponse + result2 error + }{result1, result2} +} + +func (fake *CAClient) EnrollReturnsOnCall(i int, result1 *lib.EnrollmentResponse, result2 error) { + fake.enrollMutex.Lock() + defer fake.enrollMutex.Unlock() + fake.EnrollStub = nil + if fake.enrollReturnsOnCall == nil { + fake.enrollReturnsOnCall = make(map[int]struct { + result1 *lib.EnrollmentResponse + result2 error + }) + } + fake.enrollReturnsOnCall[i] = struct { + result1 *lib.EnrollmentResponse + result2 error + }{result1, result2} +} + +func (fake *CAClient) GetEnrollmentRequest() *v1beta1.Enrollment { + fake.getEnrollmentRequestMutex.Lock() + ret, specificReturn := fake.getEnrollmentRequestReturnsOnCall[len(fake.getEnrollmentRequestArgsForCall)] + fake.getEnrollmentRequestArgsForCall = append(fake.getEnrollmentRequestArgsForCall, struct { + }{}) + stub := fake.GetEnrollmentRequestStub + fakeReturns := fake.getEnrollmentRequestReturns + fake.recordInvocation("GetEnrollmentRequest", []interface{}{}) + fake.getEnrollmentRequestMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAClient) GetEnrollmentRequestCallCount() int { + fake.getEnrollmentRequestMutex.RLock() + defer fake.getEnrollmentRequestMutex.RUnlock() + return len(fake.getEnrollmentRequestArgsForCall) +} + +func (fake *CAClient) GetEnrollmentRequestCalls(stub func() *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = stub +} + +func (fake *CAClient) GetEnrollmentRequestReturns(result1 *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = nil + fake.getEnrollmentRequestReturns = struct { + result1 *v1beta1.Enrollment + }{result1} +} + +func (fake *CAClient) GetEnrollmentRequestReturnsOnCall(i int, result1 *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = nil + if fake.getEnrollmentRequestReturnsOnCall == nil { + fake.getEnrollmentRequestReturnsOnCall = make(map[int]struct { + result1 *v1beta1.Enrollment + }) + } + fake.getEnrollmentRequestReturnsOnCall[i] = struct { + result1 *v1beta1.Enrollment + }{result1} +} + +func (fake *CAClient) GetHomeDir() string { + fake.getHomeDirMutex.Lock() + ret, specificReturn := fake.getHomeDirReturnsOnCall[len(fake.getHomeDirArgsForCall)] + fake.getHomeDirArgsForCall = append(fake.getHomeDirArgsForCall, struct { + }{}) + stub := fake.GetHomeDirStub + fakeReturns := fake.getHomeDirReturns + fake.recordInvocation("GetHomeDir", []interface{}{}) + fake.getHomeDirMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAClient) GetHomeDirCallCount() int { + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + return len(fake.getHomeDirArgsForCall) +} + +func (fake *CAClient) GetHomeDirCalls(stub func() string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = stub +} + +func (fake *CAClient) GetHomeDirReturns(result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + fake.getHomeDirReturns = struct { + result1 string + }{result1} +} + +func (fake *CAClient) GetHomeDirReturnsOnCall(i int, result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + if fake.getHomeDirReturnsOnCall == nil { + fake.getHomeDirReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getHomeDirReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CAClient) GetTLSCert() []byte { + fake.getTLSCertMutex.Lock() + ret, specificReturn := fake.getTLSCertReturnsOnCall[len(fake.getTLSCertArgsForCall)] + fake.getTLSCertArgsForCall = append(fake.getTLSCertArgsForCall, struct { + }{}) + stub := fake.GetTLSCertStub + fakeReturns := fake.getTLSCertReturns + fake.recordInvocation("GetTLSCert", []interface{}{}) + fake.getTLSCertMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAClient) GetTLSCertCallCount() int { + fake.getTLSCertMutex.RLock() + defer fake.getTLSCertMutex.RUnlock() + return len(fake.getTLSCertArgsForCall) +} + +func (fake *CAClient) GetTLSCertCalls(stub func() []byte) { + fake.getTLSCertMutex.Lock() + defer fake.getTLSCertMutex.Unlock() + fake.GetTLSCertStub = stub +} + +func (fake *CAClient) GetTLSCertReturns(result1 []byte) { + fake.getTLSCertMutex.Lock() + defer fake.getTLSCertMutex.Unlock() + fake.GetTLSCertStub = nil + fake.getTLSCertReturns = struct { + result1 []byte + }{result1} +} + +func (fake *CAClient) GetTLSCertReturnsOnCall(i int, result1 []byte) { + fake.getTLSCertMutex.Lock() + defer fake.getTLSCertMutex.Unlock() + fake.GetTLSCertStub = nil + if fake.getTLSCertReturnsOnCall == nil { + fake.getTLSCertReturnsOnCall = make(map[int]struct { + result1 []byte + }) + } + fake.getTLSCertReturnsOnCall[i] = struct { + result1 []byte + }{result1} +} + +func (fake *CAClient) Init() error { + fake.initMutex.Lock() + ret, specificReturn := fake.initReturnsOnCall[len(fake.initArgsForCall)] + fake.initArgsForCall = append(fake.initArgsForCall, struct { + }{}) + stub := fake.InitStub + fakeReturns := fake.initReturns + fake.recordInvocation("Init", []interface{}{}) + fake.initMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAClient) InitCallCount() int { + fake.initMutex.RLock() + defer fake.initMutex.RUnlock() + return len(fake.initArgsForCall) +} + +func (fake *CAClient) InitCalls(stub func() error) { + fake.initMutex.Lock() + defer fake.initMutex.Unlock() + fake.InitStub = stub +} + +func (fake *CAClient) InitReturns(result1 error) { + fake.initMutex.Lock() + defer fake.initMutex.Unlock() + fake.InitStub = nil + fake.initReturns = struct { + result1 error + }{result1} +} + +func (fake *CAClient) InitReturnsOnCall(i int, result1 error) { + fake.initMutex.Lock() + defer fake.initMutex.Unlock() + fake.InitStub = nil + if fake.initReturnsOnCall == nil { + fake.initReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.initReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CAClient) PingCA(arg1 time.Duration) error { + fake.pingCAMutex.Lock() + ret, specificReturn := fake.pingCAReturnsOnCall[len(fake.pingCAArgsForCall)] + fake.pingCAArgsForCall = append(fake.pingCAArgsForCall, struct { + arg1 time.Duration + }{arg1}) + stub := fake.PingCAStub + fakeReturns := fake.pingCAReturns + fake.recordInvocation("PingCA", []interface{}{arg1}) + fake.pingCAMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CAClient) PingCACallCount() int { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + return len(fake.pingCAArgsForCall) +} + +func (fake *CAClient) PingCACalls(stub func(time.Duration) error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = stub +} + +func (fake *CAClient) PingCAArgsForCall(i int) time.Duration { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + argsForCall := fake.pingCAArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CAClient) PingCAReturns(result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + fake.pingCAReturns = struct { + result1 error + }{result1} +} + +func (fake *CAClient) PingCAReturnsOnCall(i int, result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + if fake.pingCAReturnsOnCall == nil { + fake.pingCAReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.pingCAReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CAClient) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.enrollMutex.RLock() + defer fake.enrollMutex.RUnlock() + fake.getEnrollmentRequestMutex.RLock() + defer fake.getEnrollmentRequestMutex.RUnlock() + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + fake.getTLSCertMutex.RLock() + defer fake.getTLSCertMutex.RUnlock() + fake.initMutex.RLock() + defer fake.initMutex.RUnlock() + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CAClient) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ enroller.CAClient = new(CAClient) diff --git a/pkg/initializer/common/enroller/mocks/client.go b/pkg/initializer/common/enroller/mocks/client.go new file mode 100644 index 00000000..ee14505d --- /dev/null +++ b/pkg/initializer/common/enroller/mocks/client.go @@ -0,0 +1,746 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Client struct { + CreateStub func(context.Context, client.Object, ...controllerclient.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + CreateOrUpdateStub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + PatchStatusStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchStatusMutex sync.RWMutex + patchStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchStatusReturns struct { + result1 error + } + patchStatusReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...controllerclient.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + UpdateStatusStub func(context.Context, client.Object, ...client.UpdateOption) error + updateStatusMutex sync.RWMutex + updateStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateStatusReturns struct { + result1 error + } + updateStatusReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) Create(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Client) CreateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Client) CreateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdate(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOrUpdateOption) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *Client) CreateOrUpdateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *Client) CreateOrUpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOrUpdateOption) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *Client) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *Client) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Client) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Client) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Client) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Client) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *Client) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *Client) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatus(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchStatusMutex.Lock() + ret, specificReturn := fake.patchStatusReturnsOnCall[len(fake.patchStatusArgsForCall)] + fake.patchStatusArgsForCall = append(fake.patchStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStatusStub + fakeReturns := fake.patchStatusReturns + fake.recordInvocation("PatchStatus", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchStatusCallCount() int { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + return len(fake.patchStatusArgsForCall) +} + +func (fake *Client) PatchStatusCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = stub +} + +func (fake *Client) PatchStatusArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + argsForCall := fake.patchStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchStatusReturns(result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + fake.patchStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatusReturnsOnCall(i int, result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + if fake.patchStatusReturnsOnCall == nil { + fake.patchStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Update(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Client) UpdateCalls(stub func(context.Context, client.Object, ...controllerclient.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Client) UpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatus(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateStatusMutex.Lock() + ret, specificReturn := fake.updateStatusReturnsOnCall[len(fake.updateStatusArgsForCall)] + fake.updateStatusArgsForCall = append(fake.updateStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStatusStub + fakeReturns := fake.updateStatusReturns + fake.recordInvocation("UpdateStatus", []interface{}{arg1, arg2, arg3}) + fake.updateStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateStatusCallCount() int { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + return len(fake.updateStatusArgsForCall) +} + +func (fake *Client) UpdateStatusCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = stub +} + +func (fake *Client) UpdateStatusArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + argsForCall := fake.updateStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateStatusReturns(result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + fake.updateStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatusReturnsOnCall(i int, result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + if fake.updateStatusReturnsOnCall == nil { + fake.updateStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ controllerclient.Client = new(Client) diff --git a/pkg/initializer/common/enroller/mocks/cryptoenroller.go b/pkg/initializer/common/enroller/mocks/cryptoenroller.go new file mode 100644 index 00000000..7d30e850 --- /dev/null +++ b/pkg/initializer/common/enroller/mocks/cryptoenroller.go @@ -0,0 +1,249 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" +) + +type CryptoEnroller struct { + EnrollStub func() (*config.Response, error) + enrollMutex sync.RWMutex + enrollArgsForCall []struct { + } + enrollReturns struct { + result1 *config.Response + result2 error + } + enrollReturnsOnCall map[int]struct { + result1 *config.Response + result2 error + } + GetEnrollmentRequestStub func() *v1beta1.Enrollment + getEnrollmentRequestMutex sync.RWMutex + getEnrollmentRequestArgsForCall []struct { + } + getEnrollmentRequestReturns struct { + result1 *v1beta1.Enrollment + } + getEnrollmentRequestReturnsOnCall map[int]struct { + result1 *v1beta1.Enrollment + } + PingCAStub func(time.Duration) error + pingCAMutex sync.RWMutex + pingCAArgsForCall []struct { + arg1 time.Duration + } + pingCAReturns struct { + result1 error + } + pingCAReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CryptoEnroller) Enroll() (*config.Response, error) { + fake.enrollMutex.Lock() + ret, specificReturn := fake.enrollReturnsOnCall[len(fake.enrollArgsForCall)] + fake.enrollArgsForCall = append(fake.enrollArgsForCall, struct { + }{}) + stub := fake.EnrollStub + fakeReturns := fake.enrollReturns + fake.recordInvocation("Enroll", []interface{}{}) + fake.enrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CryptoEnroller) EnrollCallCount() int { + fake.enrollMutex.RLock() + defer fake.enrollMutex.RUnlock() + return len(fake.enrollArgsForCall) +} + +func (fake *CryptoEnroller) EnrollCalls(stub func() (*config.Response, error)) { + fake.enrollMutex.Lock() + defer fake.enrollMutex.Unlock() + fake.EnrollStub = stub +} + +func (fake *CryptoEnroller) EnrollReturns(result1 *config.Response, result2 error) { + fake.enrollMutex.Lock() + defer fake.enrollMutex.Unlock() + fake.EnrollStub = nil + fake.enrollReturns = struct { + result1 *config.Response + result2 error + }{result1, result2} +} + +func (fake *CryptoEnroller) EnrollReturnsOnCall(i int, result1 *config.Response, result2 error) { + fake.enrollMutex.Lock() + defer fake.enrollMutex.Unlock() + fake.EnrollStub = nil + if fake.enrollReturnsOnCall == nil { + fake.enrollReturnsOnCall = make(map[int]struct { + result1 *config.Response + result2 error + }) + } + fake.enrollReturnsOnCall[i] = struct { + result1 *config.Response + result2 error + }{result1, result2} +} + +func (fake *CryptoEnroller) GetEnrollmentRequest() *v1beta1.Enrollment { + fake.getEnrollmentRequestMutex.Lock() + ret, specificReturn := fake.getEnrollmentRequestReturnsOnCall[len(fake.getEnrollmentRequestArgsForCall)] + fake.getEnrollmentRequestArgsForCall = append(fake.getEnrollmentRequestArgsForCall, struct { + }{}) + stub := fake.GetEnrollmentRequestStub + fakeReturns := fake.getEnrollmentRequestReturns + fake.recordInvocation("GetEnrollmentRequest", []interface{}{}) + fake.getEnrollmentRequestMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoEnroller) GetEnrollmentRequestCallCount() int { + fake.getEnrollmentRequestMutex.RLock() + defer fake.getEnrollmentRequestMutex.RUnlock() + return len(fake.getEnrollmentRequestArgsForCall) +} + +func (fake *CryptoEnroller) GetEnrollmentRequestCalls(stub func() *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = stub +} + +func (fake *CryptoEnroller) GetEnrollmentRequestReturns(result1 *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = nil + fake.getEnrollmentRequestReturns = struct { + result1 *v1beta1.Enrollment + }{result1} +} + +func (fake *CryptoEnroller) GetEnrollmentRequestReturnsOnCall(i int, result1 *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = nil + if fake.getEnrollmentRequestReturnsOnCall == nil { + fake.getEnrollmentRequestReturnsOnCall = make(map[int]struct { + result1 *v1beta1.Enrollment + }) + } + fake.getEnrollmentRequestReturnsOnCall[i] = struct { + result1 *v1beta1.Enrollment + }{result1} +} + +func (fake *CryptoEnroller) PingCA(arg1 time.Duration) error { + fake.pingCAMutex.Lock() + ret, specificReturn := fake.pingCAReturnsOnCall[len(fake.pingCAArgsForCall)] + fake.pingCAArgsForCall = append(fake.pingCAArgsForCall, struct { + arg1 time.Duration + }{arg1}) + stub := fake.PingCAStub + fakeReturns := fake.pingCAReturns + fake.recordInvocation("PingCA", []interface{}{arg1}) + fake.pingCAMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoEnroller) PingCACallCount() int { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + return len(fake.pingCAArgsForCall) +} + +func (fake *CryptoEnroller) PingCACalls(stub func(time.Duration) error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = stub +} + +func (fake *CryptoEnroller) PingCAArgsForCall(i int) time.Duration { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + argsForCall := fake.pingCAArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoEnroller) PingCAReturns(result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + fake.pingCAReturns = struct { + result1 error + }{result1} +} + +func (fake *CryptoEnroller) PingCAReturnsOnCall(i int, result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + if fake.pingCAReturnsOnCall == nil { + fake.pingCAReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.pingCAReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CryptoEnroller) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.enrollMutex.RLock() + defer fake.enrollMutex.RUnlock() + fake.getEnrollmentRequestMutex.RLock() + defer fake.getEnrollmentRequestMutex.RUnlock() + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CryptoEnroller) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ enroller.CryptoEnroller = new(CryptoEnroller) diff --git a/pkg/initializer/common/enroller/mocks/cryptoinstance.go b/pkg/initializer/common/enroller/mocks/cryptoinstance.go new file mode 100644 index 00000000..adb75a79 --- /dev/null +++ b/pkg/initializer/common/enroller/mocks/cryptoinstance.go @@ -0,0 +1,2321 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + v1a "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +type CryptoInstance struct { + DeepCopyObjectStub func() runtime.Object + deepCopyObjectMutex sync.RWMutex + deepCopyObjectArgsForCall []struct { + } + deepCopyObjectReturns struct { + result1 runtime.Object + } + deepCopyObjectReturnsOnCall map[int]struct { + result1 runtime.Object + } + EnrollerImageStub func() string + enrollerImageMutex sync.RWMutex + enrollerImageArgsForCall []struct { + } + enrollerImageReturns struct { + result1 string + } + enrollerImageReturnsOnCall map[int]struct { + result1 string + } + GetAnnotationsStub func() map[string]string + getAnnotationsMutex sync.RWMutex + getAnnotationsArgsForCall []struct { + } + getAnnotationsReturns struct { + result1 map[string]string + } + getAnnotationsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetClusterNameStub func() string + getClusterNameMutex sync.RWMutex + getClusterNameArgsForCall []struct { + } + getClusterNameReturns struct { + result1 string + } + getClusterNameReturnsOnCall map[int]struct { + result1 string + } + GetConfigOverrideStub func() (interface{}, error) + getConfigOverrideMutex sync.RWMutex + getConfigOverrideArgsForCall []struct { + } + getConfigOverrideReturns struct { + result1 interface{} + result2 error + } + getConfigOverrideReturnsOnCall map[int]struct { + result1 interface{} + result2 error + } + GetCreationTimestampStub func() v1.Time + getCreationTimestampMutex sync.RWMutex + getCreationTimestampArgsForCall []struct { + } + getCreationTimestampReturns struct { + result1 v1.Time + } + getCreationTimestampReturnsOnCall map[int]struct { + result1 v1.Time + } + GetDeletionGracePeriodSecondsStub func() *int64 + getDeletionGracePeriodSecondsMutex sync.RWMutex + getDeletionGracePeriodSecondsArgsForCall []struct { + } + getDeletionGracePeriodSecondsReturns struct { + result1 *int64 + } + getDeletionGracePeriodSecondsReturnsOnCall map[int]struct { + result1 *int64 + } + GetDeletionTimestampStub func() *v1.Time + getDeletionTimestampMutex sync.RWMutex + getDeletionTimestampArgsForCall []struct { + } + getDeletionTimestampReturns struct { + result1 *v1.Time + } + getDeletionTimestampReturnsOnCall map[int]struct { + result1 *v1.Time + } + GetFinalizersStub func() []string + getFinalizersMutex sync.RWMutex + getFinalizersArgsForCall []struct { + } + getFinalizersReturns struct { + result1 []string + } + getFinalizersReturnsOnCall map[int]struct { + result1 []string + } + GetGenerateNameStub func() string + getGenerateNameMutex sync.RWMutex + getGenerateNameArgsForCall []struct { + } + getGenerateNameReturns struct { + result1 string + } + getGenerateNameReturnsOnCall map[int]struct { + result1 string + } + GetGenerationStub func() int64 + getGenerationMutex sync.RWMutex + getGenerationArgsForCall []struct { + } + getGenerationReturns struct { + result1 int64 + } + getGenerationReturnsOnCall map[int]struct { + result1 int64 + } + GetLabelsStub func() map[string]string + getLabelsMutex sync.RWMutex + getLabelsArgsForCall []struct { + } + getLabelsReturns struct { + result1 map[string]string + } + getLabelsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetManagedFieldsStub func() []v1.ManagedFieldsEntry + getManagedFieldsMutex sync.RWMutex + getManagedFieldsArgsForCall []struct { + } + getManagedFieldsReturns struct { + result1 []v1.ManagedFieldsEntry + } + getManagedFieldsReturnsOnCall map[int]struct { + result1 []v1.ManagedFieldsEntry + } + GetNameStub func() string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetNamespaceStub func() string + getNamespaceMutex sync.RWMutex + getNamespaceArgsForCall []struct { + } + getNamespaceReturns struct { + result1 string + } + getNamespaceReturnsOnCall map[int]struct { + result1 string + } + GetObjectKindStub func() schema.ObjectKind + getObjectKindMutex sync.RWMutex + getObjectKindArgsForCall []struct { + } + getObjectKindReturns struct { + result1 schema.ObjectKind + } + getObjectKindReturnsOnCall map[int]struct { + result1 schema.ObjectKind + } + GetOwnerReferencesStub func() []v1.OwnerReference + getOwnerReferencesMutex sync.RWMutex + getOwnerReferencesArgsForCall []struct { + } + getOwnerReferencesReturns struct { + result1 []v1.OwnerReference + } + getOwnerReferencesReturnsOnCall map[int]struct { + result1 []v1.OwnerReference + } + GetPullSecretsStub func() []v1a.LocalObjectReference + getPullSecretsMutex sync.RWMutex + getPullSecretsArgsForCall []struct { + } + getPullSecretsReturns struct { + result1 []v1a.LocalObjectReference + } + getPullSecretsReturnsOnCall map[int]struct { + result1 []v1a.LocalObjectReference + } + GetResourceStub func(v1beta1.Component) v1a.ResourceRequirements + getResourceMutex sync.RWMutex + getResourceArgsForCall []struct { + arg1 v1beta1.Component + } + getResourceReturns struct { + result1 v1a.ResourceRequirements + } + getResourceReturnsOnCall map[int]struct { + result1 v1a.ResourceRequirements + } + GetResourceVersionStub func() string + getResourceVersionMutex sync.RWMutex + getResourceVersionArgsForCall []struct { + } + getResourceVersionReturns struct { + result1 string + } + getResourceVersionReturnsOnCall map[int]struct { + result1 string + } + GetSelfLinkStub func() string + getSelfLinkMutex sync.RWMutex + getSelfLinkArgsForCall []struct { + } + getSelfLinkReturns struct { + result1 string + } + getSelfLinkReturnsOnCall map[int]struct { + result1 string + } + GetUIDStub func() types.UID + getUIDMutex sync.RWMutex + getUIDArgsForCall []struct { + } + getUIDReturns struct { + result1 types.UID + } + getUIDReturnsOnCall map[int]struct { + result1 types.UID + } + IsHSMEnabledStub func() bool + isHSMEnabledMutex sync.RWMutex + isHSMEnabledArgsForCall []struct { + } + isHSMEnabledReturns struct { + result1 bool + } + isHSMEnabledReturnsOnCall map[int]struct { + result1 bool + } + PVCNameStub func() string + pVCNameMutex sync.RWMutex + pVCNameArgsForCall []struct { + } + pVCNameReturns struct { + result1 string + } + pVCNameReturnsOnCall map[int]struct { + result1 string + } + SetAnnotationsStub func(map[string]string) + setAnnotationsMutex sync.RWMutex + setAnnotationsArgsForCall []struct { + arg1 map[string]string + } + SetClusterNameStub func(string) + setClusterNameMutex sync.RWMutex + setClusterNameArgsForCall []struct { + arg1 string + } + SetCreationTimestampStub func(v1.Time) + setCreationTimestampMutex sync.RWMutex + setCreationTimestampArgsForCall []struct { + arg1 v1.Time + } + SetDeletionGracePeriodSecondsStub func(*int64) + setDeletionGracePeriodSecondsMutex sync.RWMutex + setDeletionGracePeriodSecondsArgsForCall []struct { + arg1 *int64 + } + SetDeletionTimestampStub func(*v1.Time) + setDeletionTimestampMutex sync.RWMutex + setDeletionTimestampArgsForCall []struct { + arg1 *v1.Time + } + SetFinalizersStub func([]string) + setFinalizersMutex sync.RWMutex + setFinalizersArgsForCall []struct { + arg1 []string + } + SetGenerateNameStub func(string) + setGenerateNameMutex sync.RWMutex + setGenerateNameArgsForCall []struct { + arg1 string + } + SetGenerationStub func(int64) + setGenerationMutex sync.RWMutex + setGenerationArgsForCall []struct { + arg1 int64 + } + SetLabelsStub func(map[string]string) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 map[string]string + } + SetManagedFieldsStub func([]v1.ManagedFieldsEntry) + setManagedFieldsMutex sync.RWMutex + setManagedFieldsArgsForCall []struct { + arg1 []v1.ManagedFieldsEntry + } + SetNameStub func(string) + setNameMutex sync.RWMutex + setNameArgsForCall []struct { + arg1 string + } + SetNamespaceStub func(string) + setNamespaceMutex sync.RWMutex + setNamespaceArgsForCall []struct { + arg1 string + } + SetOwnerReferencesStub func([]v1.OwnerReference) + setOwnerReferencesMutex sync.RWMutex + setOwnerReferencesArgsForCall []struct { + arg1 []v1.OwnerReference + } + SetResourceVersionStub func(string) + setResourceVersionMutex sync.RWMutex + setResourceVersionArgsForCall []struct { + arg1 string + } + SetSelfLinkStub func(string) + setSelfLinkMutex sync.RWMutex + setSelfLinkArgsForCall []struct { + arg1 string + } + SetUIDStub func(types.UID) + setUIDMutex sync.RWMutex + setUIDArgsForCall []struct { + arg1 types.UID + } + UsingHSMProxyStub func() bool + usingHSMProxyMutex sync.RWMutex + usingHSMProxyArgsForCall []struct { + } + usingHSMProxyReturns struct { + result1 bool + } + usingHSMProxyReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CryptoInstance) DeepCopyObject() runtime.Object { + fake.deepCopyObjectMutex.Lock() + ret, specificReturn := fake.deepCopyObjectReturnsOnCall[len(fake.deepCopyObjectArgsForCall)] + fake.deepCopyObjectArgsForCall = append(fake.deepCopyObjectArgsForCall, struct { + }{}) + stub := fake.DeepCopyObjectStub + fakeReturns := fake.deepCopyObjectReturns + fake.recordInvocation("DeepCopyObject", []interface{}{}) + fake.deepCopyObjectMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) DeepCopyObjectCallCount() int { + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + return len(fake.deepCopyObjectArgsForCall) +} + +func (fake *CryptoInstance) DeepCopyObjectCalls(stub func() runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = stub +} + +func (fake *CryptoInstance) DeepCopyObjectReturns(result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + fake.deepCopyObjectReturns = struct { + result1 runtime.Object + }{result1} +} + +func (fake *CryptoInstance) DeepCopyObjectReturnsOnCall(i int, result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + if fake.deepCopyObjectReturnsOnCall == nil { + fake.deepCopyObjectReturnsOnCall = make(map[int]struct { + result1 runtime.Object + }) + } + fake.deepCopyObjectReturnsOnCall[i] = struct { + result1 runtime.Object + }{result1} +} + +func (fake *CryptoInstance) EnrollerImage() string { + fake.enrollerImageMutex.Lock() + ret, specificReturn := fake.enrollerImageReturnsOnCall[len(fake.enrollerImageArgsForCall)] + fake.enrollerImageArgsForCall = append(fake.enrollerImageArgsForCall, struct { + }{}) + stub := fake.EnrollerImageStub + fakeReturns := fake.enrollerImageReturns + fake.recordInvocation("EnrollerImage", []interface{}{}) + fake.enrollerImageMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) EnrollerImageCallCount() int { + fake.enrollerImageMutex.RLock() + defer fake.enrollerImageMutex.RUnlock() + return len(fake.enrollerImageArgsForCall) +} + +func (fake *CryptoInstance) EnrollerImageCalls(stub func() string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = stub +} + +func (fake *CryptoInstance) EnrollerImageReturns(result1 string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = nil + fake.enrollerImageReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) EnrollerImageReturnsOnCall(i int, result1 string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = nil + if fake.enrollerImageReturnsOnCall == nil { + fake.enrollerImageReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.enrollerImageReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetAnnotations() map[string]string { + fake.getAnnotationsMutex.Lock() + ret, specificReturn := fake.getAnnotationsReturnsOnCall[len(fake.getAnnotationsArgsForCall)] + fake.getAnnotationsArgsForCall = append(fake.getAnnotationsArgsForCall, struct { + }{}) + stub := fake.GetAnnotationsStub + fakeReturns := fake.getAnnotationsReturns + fake.recordInvocation("GetAnnotations", []interface{}{}) + fake.getAnnotationsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetAnnotationsCallCount() int { + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + return len(fake.getAnnotationsArgsForCall) +} + +func (fake *CryptoInstance) GetAnnotationsCalls(stub func() map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = stub +} + +func (fake *CryptoInstance) GetAnnotationsReturns(result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + fake.getAnnotationsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *CryptoInstance) GetAnnotationsReturnsOnCall(i int, result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + if fake.getAnnotationsReturnsOnCall == nil { + fake.getAnnotationsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getAnnotationsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *CryptoInstance) GetClusterName() string { + fake.getClusterNameMutex.Lock() + ret, specificReturn := fake.getClusterNameReturnsOnCall[len(fake.getClusterNameArgsForCall)] + fake.getClusterNameArgsForCall = append(fake.getClusterNameArgsForCall, struct { + }{}) + stub := fake.GetClusterNameStub + fakeReturns := fake.getClusterNameReturns + fake.recordInvocation("GetClusterName", []interface{}{}) + fake.getClusterNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetClusterNameCallCount() int { + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + return len(fake.getClusterNameArgsForCall) +} + +func (fake *CryptoInstance) GetClusterNameCalls(stub func() string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = stub +} + +func (fake *CryptoInstance) GetClusterNameReturns(result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + fake.getClusterNameReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetClusterNameReturnsOnCall(i int, result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + if fake.getClusterNameReturnsOnCall == nil { + fake.getClusterNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getClusterNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetConfigOverride() (interface{}, error) { + fake.getConfigOverrideMutex.Lock() + ret, specificReturn := fake.getConfigOverrideReturnsOnCall[len(fake.getConfigOverrideArgsForCall)] + fake.getConfigOverrideArgsForCall = append(fake.getConfigOverrideArgsForCall, struct { + }{}) + stub := fake.GetConfigOverrideStub + fakeReturns := fake.getConfigOverrideReturns + fake.recordInvocation("GetConfigOverride", []interface{}{}) + fake.getConfigOverrideMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CryptoInstance) GetConfigOverrideCallCount() int { + fake.getConfigOverrideMutex.RLock() + defer fake.getConfigOverrideMutex.RUnlock() + return len(fake.getConfigOverrideArgsForCall) +} + +func (fake *CryptoInstance) GetConfigOverrideCalls(stub func() (interface{}, error)) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = stub +} + +func (fake *CryptoInstance) GetConfigOverrideReturns(result1 interface{}, result2 error) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = nil + fake.getConfigOverrideReturns = struct { + result1 interface{} + result2 error + }{result1, result2} +} + +func (fake *CryptoInstance) GetConfigOverrideReturnsOnCall(i int, result1 interface{}, result2 error) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = nil + if fake.getConfigOverrideReturnsOnCall == nil { + fake.getConfigOverrideReturnsOnCall = make(map[int]struct { + result1 interface{} + result2 error + }) + } + fake.getConfigOverrideReturnsOnCall[i] = struct { + result1 interface{} + result2 error + }{result1, result2} +} + +func (fake *CryptoInstance) GetCreationTimestamp() v1.Time { + fake.getCreationTimestampMutex.Lock() + ret, specificReturn := fake.getCreationTimestampReturnsOnCall[len(fake.getCreationTimestampArgsForCall)] + fake.getCreationTimestampArgsForCall = append(fake.getCreationTimestampArgsForCall, struct { + }{}) + stub := fake.GetCreationTimestampStub + fakeReturns := fake.getCreationTimestampReturns + fake.recordInvocation("GetCreationTimestamp", []interface{}{}) + fake.getCreationTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetCreationTimestampCallCount() int { + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + return len(fake.getCreationTimestampArgsForCall) +} + +func (fake *CryptoInstance) GetCreationTimestampCalls(stub func() v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = stub +} + +func (fake *CryptoInstance) GetCreationTimestampReturns(result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + fake.getCreationTimestampReturns = struct { + result1 v1.Time + }{result1} +} + +func (fake *CryptoInstance) GetCreationTimestampReturnsOnCall(i int, result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + if fake.getCreationTimestampReturnsOnCall == nil { + fake.getCreationTimestampReturnsOnCall = make(map[int]struct { + result1 v1.Time + }) + } + fake.getCreationTimestampReturnsOnCall[i] = struct { + result1 v1.Time + }{result1} +} + +func (fake *CryptoInstance) GetDeletionGracePeriodSeconds() *int64 { + fake.getDeletionGracePeriodSecondsMutex.Lock() + ret, specificReturn := fake.getDeletionGracePeriodSecondsReturnsOnCall[len(fake.getDeletionGracePeriodSecondsArgsForCall)] + fake.getDeletionGracePeriodSecondsArgsForCall = append(fake.getDeletionGracePeriodSecondsArgsForCall, struct { + }{}) + stub := fake.GetDeletionGracePeriodSecondsStub + fakeReturns := fake.getDeletionGracePeriodSecondsReturns + fake.recordInvocation("GetDeletionGracePeriodSeconds", []interface{}{}) + fake.getDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetDeletionGracePeriodSecondsCallCount() int { + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.getDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *CryptoInstance) GetDeletionGracePeriodSecondsCalls(stub func() *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = stub +} + +func (fake *CryptoInstance) GetDeletionGracePeriodSecondsReturns(result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + fake.getDeletionGracePeriodSecondsReturns = struct { + result1 *int64 + }{result1} +} + +func (fake *CryptoInstance) GetDeletionGracePeriodSecondsReturnsOnCall(i int, result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + if fake.getDeletionGracePeriodSecondsReturnsOnCall == nil { + fake.getDeletionGracePeriodSecondsReturnsOnCall = make(map[int]struct { + result1 *int64 + }) + } + fake.getDeletionGracePeriodSecondsReturnsOnCall[i] = struct { + result1 *int64 + }{result1} +} + +func (fake *CryptoInstance) GetDeletionTimestamp() *v1.Time { + fake.getDeletionTimestampMutex.Lock() + ret, specificReturn := fake.getDeletionTimestampReturnsOnCall[len(fake.getDeletionTimestampArgsForCall)] + fake.getDeletionTimestampArgsForCall = append(fake.getDeletionTimestampArgsForCall, struct { + }{}) + stub := fake.GetDeletionTimestampStub + fakeReturns := fake.getDeletionTimestampReturns + fake.recordInvocation("GetDeletionTimestamp", []interface{}{}) + fake.getDeletionTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetDeletionTimestampCallCount() int { + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + return len(fake.getDeletionTimestampArgsForCall) +} + +func (fake *CryptoInstance) GetDeletionTimestampCalls(stub func() *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = stub +} + +func (fake *CryptoInstance) GetDeletionTimestampReturns(result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + fake.getDeletionTimestampReturns = struct { + result1 *v1.Time + }{result1} +} + +func (fake *CryptoInstance) GetDeletionTimestampReturnsOnCall(i int, result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + if fake.getDeletionTimestampReturnsOnCall == nil { + fake.getDeletionTimestampReturnsOnCall = make(map[int]struct { + result1 *v1.Time + }) + } + fake.getDeletionTimestampReturnsOnCall[i] = struct { + result1 *v1.Time + }{result1} +} + +func (fake *CryptoInstance) GetFinalizers() []string { + fake.getFinalizersMutex.Lock() + ret, specificReturn := fake.getFinalizersReturnsOnCall[len(fake.getFinalizersArgsForCall)] + fake.getFinalizersArgsForCall = append(fake.getFinalizersArgsForCall, struct { + }{}) + stub := fake.GetFinalizersStub + fakeReturns := fake.getFinalizersReturns + fake.recordInvocation("GetFinalizers", []interface{}{}) + fake.getFinalizersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetFinalizersCallCount() int { + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + return len(fake.getFinalizersArgsForCall) +} + +func (fake *CryptoInstance) GetFinalizersCalls(stub func() []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = stub +} + +func (fake *CryptoInstance) GetFinalizersReturns(result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + fake.getFinalizersReturns = struct { + result1 []string + }{result1} +} + +func (fake *CryptoInstance) GetFinalizersReturnsOnCall(i int, result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + if fake.getFinalizersReturnsOnCall == nil { + fake.getFinalizersReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getFinalizersReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *CryptoInstance) GetGenerateName() string { + fake.getGenerateNameMutex.Lock() + ret, specificReturn := fake.getGenerateNameReturnsOnCall[len(fake.getGenerateNameArgsForCall)] + fake.getGenerateNameArgsForCall = append(fake.getGenerateNameArgsForCall, struct { + }{}) + stub := fake.GetGenerateNameStub + fakeReturns := fake.getGenerateNameReturns + fake.recordInvocation("GetGenerateName", []interface{}{}) + fake.getGenerateNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetGenerateNameCallCount() int { + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + return len(fake.getGenerateNameArgsForCall) +} + +func (fake *CryptoInstance) GetGenerateNameCalls(stub func() string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = stub +} + +func (fake *CryptoInstance) GetGenerateNameReturns(result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + fake.getGenerateNameReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetGenerateNameReturnsOnCall(i int, result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + if fake.getGenerateNameReturnsOnCall == nil { + fake.getGenerateNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getGenerateNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetGeneration() int64 { + fake.getGenerationMutex.Lock() + ret, specificReturn := fake.getGenerationReturnsOnCall[len(fake.getGenerationArgsForCall)] + fake.getGenerationArgsForCall = append(fake.getGenerationArgsForCall, struct { + }{}) + stub := fake.GetGenerationStub + fakeReturns := fake.getGenerationReturns + fake.recordInvocation("GetGeneration", []interface{}{}) + fake.getGenerationMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetGenerationCallCount() int { + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + return len(fake.getGenerationArgsForCall) +} + +func (fake *CryptoInstance) GetGenerationCalls(stub func() int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = stub +} + +func (fake *CryptoInstance) GetGenerationReturns(result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + fake.getGenerationReturns = struct { + result1 int64 + }{result1} +} + +func (fake *CryptoInstance) GetGenerationReturnsOnCall(i int, result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + if fake.getGenerationReturnsOnCall == nil { + fake.getGenerationReturnsOnCall = make(map[int]struct { + result1 int64 + }) + } + fake.getGenerationReturnsOnCall[i] = struct { + result1 int64 + }{result1} +} + +func (fake *CryptoInstance) GetLabels() map[string]string { + fake.getLabelsMutex.Lock() + ret, specificReturn := fake.getLabelsReturnsOnCall[len(fake.getLabelsArgsForCall)] + fake.getLabelsArgsForCall = append(fake.getLabelsArgsForCall, struct { + }{}) + stub := fake.GetLabelsStub + fakeReturns := fake.getLabelsReturns + fake.recordInvocation("GetLabels", []interface{}{}) + fake.getLabelsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetLabelsCallCount() int { + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + return len(fake.getLabelsArgsForCall) +} + +func (fake *CryptoInstance) GetLabelsCalls(stub func() map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = stub +} + +func (fake *CryptoInstance) GetLabelsReturns(result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + fake.getLabelsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *CryptoInstance) GetLabelsReturnsOnCall(i int, result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + if fake.getLabelsReturnsOnCall == nil { + fake.getLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getLabelsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *CryptoInstance) GetManagedFields() []v1.ManagedFieldsEntry { + fake.getManagedFieldsMutex.Lock() + ret, specificReturn := fake.getManagedFieldsReturnsOnCall[len(fake.getManagedFieldsArgsForCall)] + fake.getManagedFieldsArgsForCall = append(fake.getManagedFieldsArgsForCall, struct { + }{}) + stub := fake.GetManagedFieldsStub + fakeReturns := fake.getManagedFieldsReturns + fake.recordInvocation("GetManagedFields", []interface{}{}) + fake.getManagedFieldsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetManagedFieldsCallCount() int { + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + return len(fake.getManagedFieldsArgsForCall) +} + +func (fake *CryptoInstance) GetManagedFieldsCalls(stub func() []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = stub +} + +func (fake *CryptoInstance) GetManagedFieldsReturns(result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + fake.getManagedFieldsReturns = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *CryptoInstance) GetManagedFieldsReturnsOnCall(i int, result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + if fake.getManagedFieldsReturnsOnCall == nil { + fake.getManagedFieldsReturnsOnCall = make(map[int]struct { + result1 []v1.ManagedFieldsEntry + }) + } + fake.getManagedFieldsReturnsOnCall[i] = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *CryptoInstance) GetName() string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + }{}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *CryptoInstance) GetNameCalls(stub func() string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *CryptoInstance) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetNamespace() string { + fake.getNamespaceMutex.Lock() + ret, specificReturn := fake.getNamespaceReturnsOnCall[len(fake.getNamespaceArgsForCall)] + fake.getNamespaceArgsForCall = append(fake.getNamespaceArgsForCall, struct { + }{}) + stub := fake.GetNamespaceStub + fakeReturns := fake.getNamespaceReturns + fake.recordInvocation("GetNamespace", []interface{}{}) + fake.getNamespaceMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetNamespaceCallCount() int { + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + return len(fake.getNamespaceArgsForCall) +} + +func (fake *CryptoInstance) GetNamespaceCalls(stub func() string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = stub +} + +func (fake *CryptoInstance) GetNamespaceReturns(result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + fake.getNamespaceReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetNamespaceReturnsOnCall(i int, result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + if fake.getNamespaceReturnsOnCall == nil { + fake.getNamespaceReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNamespaceReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetObjectKind() schema.ObjectKind { + fake.getObjectKindMutex.Lock() + ret, specificReturn := fake.getObjectKindReturnsOnCall[len(fake.getObjectKindArgsForCall)] + fake.getObjectKindArgsForCall = append(fake.getObjectKindArgsForCall, struct { + }{}) + stub := fake.GetObjectKindStub + fakeReturns := fake.getObjectKindReturns + fake.recordInvocation("GetObjectKind", []interface{}{}) + fake.getObjectKindMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetObjectKindCallCount() int { + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + return len(fake.getObjectKindArgsForCall) +} + +func (fake *CryptoInstance) GetObjectKindCalls(stub func() schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = stub +} + +func (fake *CryptoInstance) GetObjectKindReturns(result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + fake.getObjectKindReturns = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *CryptoInstance) GetObjectKindReturnsOnCall(i int, result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + if fake.getObjectKindReturnsOnCall == nil { + fake.getObjectKindReturnsOnCall = make(map[int]struct { + result1 schema.ObjectKind + }) + } + fake.getObjectKindReturnsOnCall[i] = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *CryptoInstance) GetOwnerReferences() []v1.OwnerReference { + fake.getOwnerReferencesMutex.Lock() + ret, specificReturn := fake.getOwnerReferencesReturnsOnCall[len(fake.getOwnerReferencesArgsForCall)] + fake.getOwnerReferencesArgsForCall = append(fake.getOwnerReferencesArgsForCall, struct { + }{}) + stub := fake.GetOwnerReferencesStub + fakeReturns := fake.getOwnerReferencesReturns + fake.recordInvocation("GetOwnerReferences", []interface{}{}) + fake.getOwnerReferencesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetOwnerReferencesCallCount() int { + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + return len(fake.getOwnerReferencesArgsForCall) +} + +func (fake *CryptoInstance) GetOwnerReferencesCalls(stub func() []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = stub +} + +func (fake *CryptoInstance) GetOwnerReferencesReturns(result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + fake.getOwnerReferencesReturns = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *CryptoInstance) GetOwnerReferencesReturnsOnCall(i int, result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + if fake.getOwnerReferencesReturnsOnCall == nil { + fake.getOwnerReferencesReturnsOnCall = make(map[int]struct { + result1 []v1.OwnerReference + }) + } + fake.getOwnerReferencesReturnsOnCall[i] = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *CryptoInstance) GetPullSecrets() []v1a.LocalObjectReference { + fake.getPullSecretsMutex.Lock() + ret, specificReturn := fake.getPullSecretsReturnsOnCall[len(fake.getPullSecretsArgsForCall)] + fake.getPullSecretsArgsForCall = append(fake.getPullSecretsArgsForCall, struct { + }{}) + stub := fake.GetPullSecretsStub + fakeReturns := fake.getPullSecretsReturns + fake.recordInvocation("GetPullSecrets", []interface{}{}) + fake.getPullSecretsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetPullSecretsCallCount() int { + fake.getPullSecretsMutex.RLock() + defer fake.getPullSecretsMutex.RUnlock() + return len(fake.getPullSecretsArgsForCall) +} + +func (fake *CryptoInstance) GetPullSecretsCalls(stub func() []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = stub +} + +func (fake *CryptoInstance) GetPullSecretsReturns(result1 []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = nil + fake.getPullSecretsReturns = struct { + result1 []v1a.LocalObjectReference + }{result1} +} + +func (fake *CryptoInstance) GetPullSecretsReturnsOnCall(i int, result1 []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = nil + if fake.getPullSecretsReturnsOnCall == nil { + fake.getPullSecretsReturnsOnCall = make(map[int]struct { + result1 []v1a.LocalObjectReference + }) + } + fake.getPullSecretsReturnsOnCall[i] = struct { + result1 []v1a.LocalObjectReference + }{result1} +} + +func (fake *CryptoInstance) GetResource(arg1 v1beta1.Component) v1a.ResourceRequirements { + fake.getResourceMutex.Lock() + ret, specificReturn := fake.getResourceReturnsOnCall[len(fake.getResourceArgsForCall)] + fake.getResourceArgsForCall = append(fake.getResourceArgsForCall, struct { + arg1 v1beta1.Component + }{arg1}) + stub := fake.GetResourceStub + fakeReturns := fake.getResourceReturns + fake.recordInvocation("GetResource", []interface{}{arg1}) + fake.getResourceMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetResourceCallCount() int { + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + return len(fake.getResourceArgsForCall) +} + +func (fake *CryptoInstance) GetResourceCalls(stub func(v1beta1.Component) v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = stub +} + +func (fake *CryptoInstance) GetResourceArgsForCall(i int) v1beta1.Component { + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + argsForCall := fake.getResourceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) GetResourceReturns(result1 v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = nil + fake.getResourceReturns = struct { + result1 v1a.ResourceRequirements + }{result1} +} + +func (fake *CryptoInstance) GetResourceReturnsOnCall(i int, result1 v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = nil + if fake.getResourceReturnsOnCall == nil { + fake.getResourceReturnsOnCall = make(map[int]struct { + result1 v1a.ResourceRequirements + }) + } + fake.getResourceReturnsOnCall[i] = struct { + result1 v1a.ResourceRequirements + }{result1} +} + +func (fake *CryptoInstance) GetResourceVersion() string { + fake.getResourceVersionMutex.Lock() + ret, specificReturn := fake.getResourceVersionReturnsOnCall[len(fake.getResourceVersionArgsForCall)] + fake.getResourceVersionArgsForCall = append(fake.getResourceVersionArgsForCall, struct { + }{}) + stub := fake.GetResourceVersionStub + fakeReturns := fake.getResourceVersionReturns + fake.recordInvocation("GetResourceVersion", []interface{}{}) + fake.getResourceVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetResourceVersionCallCount() int { + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + return len(fake.getResourceVersionArgsForCall) +} + +func (fake *CryptoInstance) GetResourceVersionCalls(stub func() string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = stub +} + +func (fake *CryptoInstance) GetResourceVersionReturns(result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + fake.getResourceVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetResourceVersionReturnsOnCall(i int, result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + if fake.getResourceVersionReturnsOnCall == nil { + fake.getResourceVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getResourceVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetSelfLink() string { + fake.getSelfLinkMutex.Lock() + ret, specificReturn := fake.getSelfLinkReturnsOnCall[len(fake.getSelfLinkArgsForCall)] + fake.getSelfLinkArgsForCall = append(fake.getSelfLinkArgsForCall, struct { + }{}) + stub := fake.GetSelfLinkStub + fakeReturns := fake.getSelfLinkReturns + fake.recordInvocation("GetSelfLink", []interface{}{}) + fake.getSelfLinkMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetSelfLinkCallCount() int { + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + return len(fake.getSelfLinkArgsForCall) +} + +func (fake *CryptoInstance) GetSelfLinkCalls(stub func() string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = stub +} + +func (fake *CryptoInstance) GetSelfLinkReturns(result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + fake.getSelfLinkReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetSelfLinkReturnsOnCall(i int, result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + if fake.getSelfLinkReturnsOnCall == nil { + fake.getSelfLinkReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getSelfLinkReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) GetUID() types.UID { + fake.getUIDMutex.Lock() + ret, specificReturn := fake.getUIDReturnsOnCall[len(fake.getUIDArgsForCall)] + fake.getUIDArgsForCall = append(fake.getUIDArgsForCall, struct { + }{}) + stub := fake.GetUIDStub + fakeReturns := fake.getUIDReturns + fake.recordInvocation("GetUID", []interface{}{}) + fake.getUIDMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) GetUIDCallCount() int { + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + return len(fake.getUIDArgsForCall) +} + +func (fake *CryptoInstance) GetUIDCalls(stub func() types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = stub +} + +func (fake *CryptoInstance) GetUIDReturns(result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + fake.getUIDReturns = struct { + result1 types.UID + }{result1} +} + +func (fake *CryptoInstance) GetUIDReturnsOnCall(i int, result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + if fake.getUIDReturnsOnCall == nil { + fake.getUIDReturnsOnCall = make(map[int]struct { + result1 types.UID + }) + } + fake.getUIDReturnsOnCall[i] = struct { + result1 types.UID + }{result1} +} + +func (fake *CryptoInstance) IsHSMEnabled() bool { + fake.isHSMEnabledMutex.Lock() + ret, specificReturn := fake.isHSMEnabledReturnsOnCall[len(fake.isHSMEnabledArgsForCall)] + fake.isHSMEnabledArgsForCall = append(fake.isHSMEnabledArgsForCall, struct { + }{}) + stub := fake.IsHSMEnabledStub + fakeReturns := fake.isHSMEnabledReturns + fake.recordInvocation("IsHSMEnabled", []interface{}{}) + fake.isHSMEnabledMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) IsHSMEnabledCallCount() int { + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + return len(fake.isHSMEnabledArgsForCall) +} + +func (fake *CryptoInstance) IsHSMEnabledCalls(stub func() bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = stub +} + +func (fake *CryptoInstance) IsHSMEnabledReturns(result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + fake.isHSMEnabledReturns = struct { + result1 bool + }{result1} +} + +func (fake *CryptoInstance) IsHSMEnabledReturnsOnCall(i int, result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + if fake.isHSMEnabledReturnsOnCall == nil { + fake.isHSMEnabledReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isHSMEnabledReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *CryptoInstance) PVCName() string { + fake.pVCNameMutex.Lock() + ret, specificReturn := fake.pVCNameReturnsOnCall[len(fake.pVCNameArgsForCall)] + fake.pVCNameArgsForCall = append(fake.pVCNameArgsForCall, struct { + }{}) + stub := fake.PVCNameStub + fakeReturns := fake.pVCNameReturns + fake.recordInvocation("PVCName", []interface{}{}) + fake.pVCNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) PVCNameCallCount() int { + fake.pVCNameMutex.RLock() + defer fake.pVCNameMutex.RUnlock() + return len(fake.pVCNameArgsForCall) +} + +func (fake *CryptoInstance) PVCNameCalls(stub func() string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = stub +} + +func (fake *CryptoInstance) PVCNameReturns(result1 string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = nil + fake.pVCNameReturns = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) PVCNameReturnsOnCall(i int, result1 string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = nil + if fake.pVCNameReturnsOnCall == nil { + fake.pVCNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.pVCNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *CryptoInstance) SetAnnotations(arg1 map[string]string) { + fake.setAnnotationsMutex.Lock() + fake.setAnnotationsArgsForCall = append(fake.setAnnotationsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetAnnotationsStub + fake.recordInvocation("SetAnnotations", []interface{}{arg1}) + fake.setAnnotationsMutex.Unlock() + if stub != nil { + fake.SetAnnotationsStub(arg1) + } +} + +func (fake *CryptoInstance) SetAnnotationsCallCount() int { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + return len(fake.setAnnotationsArgsForCall) +} + +func (fake *CryptoInstance) SetAnnotationsCalls(stub func(map[string]string)) { + fake.setAnnotationsMutex.Lock() + defer fake.setAnnotationsMutex.Unlock() + fake.SetAnnotationsStub = stub +} + +func (fake *CryptoInstance) SetAnnotationsArgsForCall(i int) map[string]string { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + argsForCall := fake.setAnnotationsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetClusterName(arg1 string) { + fake.setClusterNameMutex.Lock() + fake.setClusterNameArgsForCall = append(fake.setClusterNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetClusterNameStub + fake.recordInvocation("SetClusterName", []interface{}{arg1}) + fake.setClusterNameMutex.Unlock() + if stub != nil { + fake.SetClusterNameStub(arg1) + } +} + +func (fake *CryptoInstance) SetClusterNameCallCount() int { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + return len(fake.setClusterNameArgsForCall) +} + +func (fake *CryptoInstance) SetClusterNameCalls(stub func(string)) { + fake.setClusterNameMutex.Lock() + defer fake.setClusterNameMutex.Unlock() + fake.SetClusterNameStub = stub +} + +func (fake *CryptoInstance) SetClusterNameArgsForCall(i int) string { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + argsForCall := fake.setClusterNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetCreationTimestamp(arg1 v1.Time) { + fake.setCreationTimestampMutex.Lock() + fake.setCreationTimestampArgsForCall = append(fake.setCreationTimestampArgsForCall, struct { + arg1 v1.Time + }{arg1}) + stub := fake.SetCreationTimestampStub + fake.recordInvocation("SetCreationTimestamp", []interface{}{arg1}) + fake.setCreationTimestampMutex.Unlock() + if stub != nil { + fake.SetCreationTimestampStub(arg1) + } +} + +func (fake *CryptoInstance) SetCreationTimestampCallCount() int { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + return len(fake.setCreationTimestampArgsForCall) +} + +func (fake *CryptoInstance) SetCreationTimestampCalls(stub func(v1.Time)) { + fake.setCreationTimestampMutex.Lock() + defer fake.setCreationTimestampMutex.Unlock() + fake.SetCreationTimestampStub = stub +} + +func (fake *CryptoInstance) SetCreationTimestampArgsForCall(i int) v1.Time { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + argsForCall := fake.setCreationTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetDeletionGracePeriodSeconds(arg1 *int64) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + fake.setDeletionGracePeriodSecondsArgsForCall = append(fake.setDeletionGracePeriodSecondsArgsForCall, struct { + arg1 *int64 + }{arg1}) + stub := fake.SetDeletionGracePeriodSecondsStub + fake.recordInvocation("SetDeletionGracePeriodSeconds", []interface{}{arg1}) + fake.setDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + fake.SetDeletionGracePeriodSecondsStub(arg1) + } +} + +func (fake *CryptoInstance) SetDeletionGracePeriodSecondsCallCount() int { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.setDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *CryptoInstance) SetDeletionGracePeriodSecondsCalls(stub func(*int64)) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + defer fake.setDeletionGracePeriodSecondsMutex.Unlock() + fake.SetDeletionGracePeriodSecondsStub = stub +} + +func (fake *CryptoInstance) SetDeletionGracePeriodSecondsArgsForCall(i int) *int64 { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + argsForCall := fake.setDeletionGracePeriodSecondsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetDeletionTimestamp(arg1 *v1.Time) { + fake.setDeletionTimestampMutex.Lock() + fake.setDeletionTimestampArgsForCall = append(fake.setDeletionTimestampArgsForCall, struct { + arg1 *v1.Time + }{arg1}) + stub := fake.SetDeletionTimestampStub + fake.recordInvocation("SetDeletionTimestamp", []interface{}{arg1}) + fake.setDeletionTimestampMutex.Unlock() + if stub != nil { + fake.SetDeletionTimestampStub(arg1) + } +} + +func (fake *CryptoInstance) SetDeletionTimestampCallCount() int { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + return len(fake.setDeletionTimestampArgsForCall) +} + +func (fake *CryptoInstance) SetDeletionTimestampCalls(stub func(*v1.Time)) { + fake.setDeletionTimestampMutex.Lock() + defer fake.setDeletionTimestampMutex.Unlock() + fake.SetDeletionTimestampStub = stub +} + +func (fake *CryptoInstance) SetDeletionTimestampArgsForCall(i int) *v1.Time { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + argsForCall := fake.setDeletionTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetFinalizers(arg1 []string) { + var arg1Copy []string + if arg1 != nil { + arg1Copy = make([]string, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setFinalizersMutex.Lock() + fake.setFinalizersArgsForCall = append(fake.setFinalizersArgsForCall, struct { + arg1 []string + }{arg1Copy}) + stub := fake.SetFinalizersStub + fake.recordInvocation("SetFinalizers", []interface{}{arg1Copy}) + fake.setFinalizersMutex.Unlock() + if stub != nil { + fake.SetFinalizersStub(arg1) + } +} + +func (fake *CryptoInstance) SetFinalizersCallCount() int { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + return len(fake.setFinalizersArgsForCall) +} + +func (fake *CryptoInstance) SetFinalizersCalls(stub func([]string)) { + fake.setFinalizersMutex.Lock() + defer fake.setFinalizersMutex.Unlock() + fake.SetFinalizersStub = stub +} + +func (fake *CryptoInstance) SetFinalizersArgsForCall(i int) []string { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + argsForCall := fake.setFinalizersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetGenerateName(arg1 string) { + fake.setGenerateNameMutex.Lock() + fake.setGenerateNameArgsForCall = append(fake.setGenerateNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetGenerateNameStub + fake.recordInvocation("SetGenerateName", []interface{}{arg1}) + fake.setGenerateNameMutex.Unlock() + if stub != nil { + fake.SetGenerateNameStub(arg1) + } +} + +func (fake *CryptoInstance) SetGenerateNameCallCount() int { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + return len(fake.setGenerateNameArgsForCall) +} + +func (fake *CryptoInstance) SetGenerateNameCalls(stub func(string)) { + fake.setGenerateNameMutex.Lock() + defer fake.setGenerateNameMutex.Unlock() + fake.SetGenerateNameStub = stub +} + +func (fake *CryptoInstance) SetGenerateNameArgsForCall(i int) string { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + argsForCall := fake.setGenerateNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetGeneration(arg1 int64) { + fake.setGenerationMutex.Lock() + fake.setGenerationArgsForCall = append(fake.setGenerationArgsForCall, struct { + arg1 int64 + }{arg1}) + stub := fake.SetGenerationStub + fake.recordInvocation("SetGeneration", []interface{}{arg1}) + fake.setGenerationMutex.Unlock() + if stub != nil { + fake.SetGenerationStub(arg1) + } +} + +func (fake *CryptoInstance) SetGenerationCallCount() int { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + return len(fake.setGenerationArgsForCall) +} + +func (fake *CryptoInstance) SetGenerationCalls(stub func(int64)) { + fake.setGenerationMutex.Lock() + defer fake.setGenerationMutex.Unlock() + fake.SetGenerationStub = stub +} + +func (fake *CryptoInstance) SetGenerationArgsForCall(i int) int64 { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + argsForCall := fake.setGenerationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetLabels(arg1 map[string]string) { + fake.setLabelsMutex.Lock() + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetLabelsStub + fake.recordInvocation("SetLabels", []interface{}{arg1}) + fake.setLabelsMutex.Unlock() + if stub != nil { + fake.SetLabelsStub(arg1) + } +} + +func (fake *CryptoInstance) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *CryptoInstance) SetLabelsCalls(stub func(map[string]string)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *CryptoInstance) SetLabelsArgsForCall(i int) map[string]string { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetManagedFields(arg1 []v1.ManagedFieldsEntry) { + var arg1Copy []v1.ManagedFieldsEntry + if arg1 != nil { + arg1Copy = make([]v1.ManagedFieldsEntry, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setManagedFieldsMutex.Lock() + fake.setManagedFieldsArgsForCall = append(fake.setManagedFieldsArgsForCall, struct { + arg1 []v1.ManagedFieldsEntry + }{arg1Copy}) + stub := fake.SetManagedFieldsStub + fake.recordInvocation("SetManagedFields", []interface{}{arg1Copy}) + fake.setManagedFieldsMutex.Unlock() + if stub != nil { + fake.SetManagedFieldsStub(arg1) + } +} + +func (fake *CryptoInstance) SetManagedFieldsCallCount() int { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + return len(fake.setManagedFieldsArgsForCall) +} + +func (fake *CryptoInstance) SetManagedFieldsCalls(stub func([]v1.ManagedFieldsEntry)) { + fake.setManagedFieldsMutex.Lock() + defer fake.setManagedFieldsMutex.Unlock() + fake.SetManagedFieldsStub = stub +} + +func (fake *CryptoInstance) SetManagedFieldsArgsForCall(i int) []v1.ManagedFieldsEntry { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + argsForCall := fake.setManagedFieldsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetName(arg1 string) { + fake.setNameMutex.Lock() + fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNameStub + fake.recordInvocation("SetName", []interface{}{arg1}) + fake.setNameMutex.Unlock() + if stub != nil { + fake.SetNameStub(arg1) + } +} + +func (fake *CryptoInstance) SetNameCallCount() int { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + return len(fake.setNameArgsForCall) +} + +func (fake *CryptoInstance) SetNameCalls(stub func(string)) { + fake.setNameMutex.Lock() + defer fake.setNameMutex.Unlock() + fake.SetNameStub = stub +} + +func (fake *CryptoInstance) SetNameArgsForCall(i int) string { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + argsForCall := fake.setNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetNamespace(arg1 string) { + fake.setNamespaceMutex.Lock() + fake.setNamespaceArgsForCall = append(fake.setNamespaceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNamespaceStub + fake.recordInvocation("SetNamespace", []interface{}{arg1}) + fake.setNamespaceMutex.Unlock() + if stub != nil { + fake.SetNamespaceStub(arg1) + } +} + +func (fake *CryptoInstance) SetNamespaceCallCount() int { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + return len(fake.setNamespaceArgsForCall) +} + +func (fake *CryptoInstance) SetNamespaceCalls(stub func(string)) { + fake.setNamespaceMutex.Lock() + defer fake.setNamespaceMutex.Unlock() + fake.SetNamespaceStub = stub +} + +func (fake *CryptoInstance) SetNamespaceArgsForCall(i int) string { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + argsForCall := fake.setNamespaceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetOwnerReferences(arg1 []v1.OwnerReference) { + var arg1Copy []v1.OwnerReference + if arg1 != nil { + arg1Copy = make([]v1.OwnerReference, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setOwnerReferencesMutex.Lock() + fake.setOwnerReferencesArgsForCall = append(fake.setOwnerReferencesArgsForCall, struct { + arg1 []v1.OwnerReference + }{arg1Copy}) + stub := fake.SetOwnerReferencesStub + fake.recordInvocation("SetOwnerReferences", []interface{}{arg1Copy}) + fake.setOwnerReferencesMutex.Unlock() + if stub != nil { + fake.SetOwnerReferencesStub(arg1) + } +} + +func (fake *CryptoInstance) SetOwnerReferencesCallCount() int { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + return len(fake.setOwnerReferencesArgsForCall) +} + +func (fake *CryptoInstance) SetOwnerReferencesCalls(stub func([]v1.OwnerReference)) { + fake.setOwnerReferencesMutex.Lock() + defer fake.setOwnerReferencesMutex.Unlock() + fake.SetOwnerReferencesStub = stub +} + +func (fake *CryptoInstance) SetOwnerReferencesArgsForCall(i int) []v1.OwnerReference { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + argsForCall := fake.setOwnerReferencesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetResourceVersion(arg1 string) { + fake.setResourceVersionMutex.Lock() + fake.setResourceVersionArgsForCall = append(fake.setResourceVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetResourceVersionStub + fake.recordInvocation("SetResourceVersion", []interface{}{arg1}) + fake.setResourceVersionMutex.Unlock() + if stub != nil { + fake.SetResourceVersionStub(arg1) + } +} + +func (fake *CryptoInstance) SetResourceVersionCallCount() int { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + return len(fake.setResourceVersionArgsForCall) +} + +func (fake *CryptoInstance) SetResourceVersionCalls(stub func(string)) { + fake.setResourceVersionMutex.Lock() + defer fake.setResourceVersionMutex.Unlock() + fake.SetResourceVersionStub = stub +} + +func (fake *CryptoInstance) SetResourceVersionArgsForCall(i int) string { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + argsForCall := fake.setResourceVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetSelfLink(arg1 string) { + fake.setSelfLinkMutex.Lock() + fake.setSelfLinkArgsForCall = append(fake.setSelfLinkArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetSelfLinkStub + fake.recordInvocation("SetSelfLink", []interface{}{arg1}) + fake.setSelfLinkMutex.Unlock() + if stub != nil { + fake.SetSelfLinkStub(arg1) + } +} + +func (fake *CryptoInstance) SetSelfLinkCallCount() int { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + return len(fake.setSelfLinkArgsForCall) +} + +func (fake *CryptoInstance) SetSelfLinkCalls(stub func(string)) { + fake.setSelfLinkMutex.Lock() + defer fake.setSelfLinkMutex.Unlock() + fake.SetSelfLinkStub = stub +} + +func (fake *CryptoInstance) SetSelfLinkArgsForCall(i int) string { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + argsForCall := fake.setSelfLinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) SetUID(arg1 types.UID) { + fake.setUIDMutex.Lock() + fake.setUIDArgsForCall = append(fake.setUIDArgsForCall, struct { + arg1 types.UID + }{arg1}) + stub := fake.SetUIDStub + fake.recordInvocation("SetUID", []interface{}{arg1}) + fake.setUIDMutex.Unlock() + if stub != nil { + fake.SetUIDStub(arg1) + } +} + +func (fake *CryptoInstance) SetUIDCallCount() int { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + return len(fake.setUIDArgsForCall) +} + +func (fake *CryptoInstance) SetUIDCalls(stub func(types.UID)) { + fake.setUIDMutex.Lock() + defer fake.setUIDMutex.Unlock() + fake.SetUIDStub = stub +} + +func (fake *CryptoInstance) SetUIDArgsForCall(i int) types.UID { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + argsForCall := fake.setUIDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoInstance) UsingHSMProxy() bool { + fake.usingHSMProxyMutex.Lock() + ret, specificReturn := fake.usingHSMProxyReturnsOnCall[len(fake.usingHSMProxyArgsForCall)] + fake.usingHSMProxyArgsForCall = append(fake.usingHSMProxyArgsForCall, struct { + }{}) + stub := fake.UsingHSMProxyStub + fakeReturns := fake.usingHSMProxyReturns + fake.recordInvocation("UsingHSMProxy", []interface{}{}) + fake.usingHSMProxyMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoInstance) UsingHSMProxyCallCount() int { + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + return len(fake.usingHSMProxyArgsForCall) +} + +func (fake *CryptoInstance) UsingHSMProxyCalls(stub func() bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = stub +} + +func (fake *CryptoInstance) UsingHSMProxyReturns(result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + fake.usingHSMProxyReturns = struct { + result1 bool + }{result1} +} + +func (fake *CryptoInstance) UsingHSMProxyReturnsOnCall(i int, result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + if fake.usingHSMProxyReturnsOnCall == nil { + fake.usingHSMProxyReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.usingHSMProxyReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *CryptoInstance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + fake.enrollerImageMutex.RLock() + defer fake.enrollerImageMutex.RUnlock() + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + fake.getConfigOverrideMutex.RLock() + defer fake.getConfigOverrideMutex.RUnlock() + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + fake.getPullSecretsMutex.RLock() + defer fake.getPullSecretsMutex.RUnlock() + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + fake.pVCNameMutex.RLock() + defer fake.pVCNameMutex.RUnlock() + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CryptoInstance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ enroller.CryptoInstance = new(CryptoInstance) diff --git a/pkg/initializer/common/enroller/mocks/hsmcaclient.go b/pkg/initializer/common/enroller/mocks/hsmcaclient.go new file mode 100644 index 00000000..c165c295 --- /dev/null +++ b/pkg/initializer/common/enroller/mocks/hsmcaclient.go @@ -0,0 +1,348 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/hyperledger/fabric-ca/lib" +) + +type HSMCAClient struct { + GetConfigStub func() *lib.ClientConfig + getConfigMutex sync.RWMutex + getConfigArgsForCall []struct { + } + getConfigReturns struct { + result1 *lib.ClientConfig + } + getConfigReturnsOnCall map[int]struct { + result1 *lib.ClientConfig + } + GetEnrollmentRequestStub func() *v1beta1.Enrollment + getEnrollmentRequestMutex sync.RWMutex + getEnrollmentRequestArgsForCall []struct { + } + getEnrollmentRequestReturns struct { + result1 *v1beta1.Enrollment + } + getEnrollmentRequestReturnsOnCall map[int]struct { + result1 *v1beta1.Enrollment + } + GetHomeDirStub func() string + getHomeDirMutex sync.RWMutex + getHomeDirArgsForCall []struct { + } + getHomeDirReturns struct { + result1 string + } + getHomeDirReturnsOnCall map[int]struct { + result1 string + } + PingCAStub func(time.Duration) error + pingCAMutex sync.RWMutex + pingCAArgsForCall []struct { + arg1 time.Duration + } + pingCAReturns struct { + result1 error + } + pingCAReturnsOnCall map[int]struct { + result1 error + } + SetHSMLibraryStub func(string) + setHSMLibraryMutex sync.RWMutex + setHSMLibraryArgsForCall []struct { + arg1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *HSMCAClient) GetConfig() *lib.ClientConfig { + fake.getConfigMutex.Lock() + ret, specificReturn := fake.getConfigReturnsOnCall[len(fake.getConfigArgsForCall)] + fake.getConfigArgsForCall = append(fake.getConfigArgsForCall, struct { + }{}) + stub := fake.GetConfigStub + fakeReturns := fake.getConfigReturns + fake.recordInvocation("GetConfig", []interface{}{}) + fake.getConfigMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *HSMCAClient) GetConfigCallCount() int { + fake.getConfigMutex.RLock() + defer fake.getConfigMutex.RUnlock() + return len(fake.getConfigArgsForCall) +} + +func (fake *HSMCAClient) GetConfigCalls(stub func() *lib.ClientConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = stub +} + +func (fake *HSMCAClient) GetConfigReturns(result1 *lib.ClientConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = nil + fake.getConfigReturns = struct { + result1 *lib.ClientConfig + }{result1} +} + +func (fake *HSMCAClient) GetConfigReturnsOnCall(i int, result1 *lib.ClientConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = nil + if fake.getConfigReturnsOnCall == nil { + fake.getConfigReturnsOnCall = make(map[int]struct { + result1 *lib.ClientConfig + }) + } + fake.getConfigReturnsOnCall[i] = struct { + result1 *lib.ClientConfig + }{result1} +} + +func (fake *HSMCAClient) GetEnrollmentRequest() *v1beta1.Enrollment { + fake.getEnrollmentRequestMutex.Lock() + ret, specificReturn := fake.getEnrollmentRequestReturnsOnCall[len(fake.getEnrollmentRequestArgsForCall)] + fake.getEnrollmentRequestArgsForCall = append(fake.getEnrollmentRequestArgsForCall, struct { + }{}) + stub := fake.GetEnrollmentRequestStub + fakeReturns := fake.getEnrollmentRequestReturns + fake.recordInvocation("GetEnrollmentRequest", []interface{}{}) + fake.getEnrollmentRequestMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *HSMCAClient) GetEnrollmentRequestCallCount() int { + fake.getEnrollmentRequestMutex.RLock() + defer fake.getEnrollmentRequestMutex.RUnlock() + return len(fake.getEnrollmentRequestArgsForCall) +} + +func (fake *HSMCAClient) GetEnrollmentRequestCalls(stub func() *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = stub +} + +func (fake *HSMCAClient) GetEnrollmentRequestReturns(result1 *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = nil + fake.getEnrollmentRequestReturns = struct { + result1 *v1beta1.Enrollment + }{result1} +} + +func (fake *HSMCAClient) GetEnrollmentRequestReturnsOnCall(i int, result1 *v1beta1.Enrollment) { + fake.getEnrollmentRequestMutex.Lock() + defer fake.getEnrollmentRequestMutex.Unlock() + fake.GetEnrollmentRequestStub = nil + if fake.getEnrollmentRequestReturnsOnCall == nil { + fake.getEnrollmentRequestReturnsOnCall = make(map[int]struct { + result1 *v1beta1.Enrollment + }) + } + fake.getEnrollmentRequestReturnsOnCall[i] = struct { + result1 *v1beta1.Enrollment + }{result1} +} + +func (fake *HSMCAClient) GetHomeDir() string { + fake.getHomeDirMutex.Lock() + ret, specificReturn := fake.getHomeDirReturnsOnCall[len(fake.getHomeDirArgsForCall)] + fake.getHomeDirArgsForCall = append(fake.getHomeDirArgsForCall, struct { + }{}) + stub := fake.GetHomeDirStub + fakeReturns := fake.getHomeDirReturns + fake.recordInvocation("GetHomeDir", []interface{}{}) + fake.getHomeDirMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *HSMCAClient) GetHomeDirCallCount() int { + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + return len(fake.getHomeDirArgsForCall) +} + +func (fake *HSMCAClient) GetHomeDirCalls(stub func() string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = stub +} + +func (fake *HSMCAClient) GetHomeDirReturns(result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + fake.getHomeDirReturns = struct { + result1 string + }{result1} +} + +func (fake *HSMCAClient) GetHomeDirReturnsOnCall(i int, result1 string) { + fake.getHomeDirMutex.Lock() + defer fake.getHomeDirMutex.Unlock() + fake.GetHomeDirStub = nil + if fake.getHomeDirReturnsOnCall == nil { + fake.getHomeDirReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getHomeDirReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *HSMCAClient) PingCA(arg1 time.Duration) error { + fake.pingCAMutex.Lock() + ret, specificReturn := fake.pingCAReturnsOnCall[len(fake.pingCAArgsForCall)] + fake.pingCAArgsForCall = append(fake.pingCAArgsForCall, struct { + arg1 time.Duration + }{arg1}) + stub := fake.PingCAStub + fakeReturns := fake.pingCAReturns + fake.recordInvocation("PingCA", []interface{}{arg1}) + fake.pingCAMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *HSMCAClient) PingCACallCount() int { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + return len(fake.pingCAArgsForCall) +} + +func (fake *HSMCAClient) PingCACalls(stub func(time.Duration) error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = stub +} + +func (fake *HSMCAClient) PingCAArgsForCall(i int) time.Duration { + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + argsForCall := fake.pingCAArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *HSMCAClient) PingCAReturns(result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + fake.pingCAReturns = struct { + result1 error + }{result1} +} + +func (fake *HSMCAClient) PingCAReturnsOnCall(i int, result1 error) { + fake.pingCAMutex.Lock() + defer fake.pingCAMutex.Unlock() + fake.PingCAStub = nil + if fake.pingCAReturnsOnCall == nil { + fake.pingCAReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.pingCAReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *HSMCAClient) SetHSMLibrary(arg1 string) { + fake.setHSMLibraryMutex.Lock() + fake.setHSMLibraryArgsForCall = append(fake.setHSMLibraryArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetHSMLibraryStub + fake.recordInvocation("SetHSMLibrary", []interface{}{arg1}) + fake.setHSMLibraryMutex.Unlock() + if stub != nil { + fake.SetHSMLibraryStub(arg1) + } +} + +func (fake *HSMCAClient) SetHSMLibraryCallCount() int { + fake.setHSMLibraryMutex.RLock() + defer fake.setHSMLibraryMutex.RUnlock() + return len(fake.setHSMLibraryArgsForCall) +} + +func (fake *HSMCAClient) SetHSMLibraryCalls(stub func(string)) { + fake.setHSMLibraryMutex.Lock() + defer fake.setHSMLibraryMutex.Unlock() + fake.SetHSMLibraryStub = stub +} + +func (fake *HSMCAClient) SetHSMLibraryArgsForCall(i int) string { + fake.setHSMLibraryMutex.RLock() + defer fake.setHSMLibraryMutex.RUnlock() + argsForCall := fake.setHSMLibraryArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *HSMCAClient) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getConfigMutex.RLock() + defer fake.getConfigMutex.RUnlock() + fake.getEnrollmentRequestMutex.RLock() + defer fake.getEnrollmentRequestMutex.RUnlock() + fake.getHomeDirMutex.RLock() + defer fake.getHomeDirMutex.RUnlock() + fake.pingCAMutex.RLock() + defer fake.pingCAMutex.RUnlock() + fake.setHSMLibraryMutex.RLock() + defer fake.setHSMLibraryMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *HSMCAClient) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ enroller.HSMCAClient = new(HSMCAClient) diff --git a/pkg/initializer/common/enroller/mocks/instance.go b/pkg/initializer/common/enroller/mocks/instance.go new file mode 100644 index 00000000..50347060 --- /dev/null +++ b/pkg/initializer/common/enroller/mocks/instance.go @@ -0,0 +1,1989 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + v1a "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type Instance struct { + EnrollerImageStub func() string + enrollerImageMutex sync.RWMutex + enrollerImageArgsForCall []struct { + } + enrollerImageReturns struct { + result1 string + } + enrollerImageReturnsOnCall map[int]struct { + result1 string + } + GetAnnotationsStub func() map[string]string + getAnnotationsMutex sync.RWMutex + getAnnotationsArgsForCall []struct { + } + getAnnotationsReturns struct { + result1 map[string]string + } + getAnnotationsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetClusterNameStub func() string + getClusterNameMutex sync.RWMutex + getClusterNameArgsForCall []struct { + } + getClusterNameReturns struct { + result1 string + } + getClusterNameReturnsOnCall map[int]struct { + result1 string + } + GetCreationTimestampStub func() v1.Time + getCreationTimestampMutex sync.RWMutex + getCreationTimestampArgsForCall []struct { + } + getCreationTimestampReturns struct { + result1 v1.Time + } + getCreationTimestampReturnsOnCall map[int]struct { + result1 v1.Time + } + GetDeletionGracePeriodSecondsStub func() *int64 + getDeletionGracePeriodSecondsMutex sync.RWMutex + getDeletionGracePeriodSecondsArgsForCall []struct { + } + getDeletionGracePeriodSecondsReturns struct { + result1 *int64 + } + getDeletionGracePeriodSecondsReturnsOnCall map[int]struct { + result1 *int64 + } + GetDeletionTimestampStub func() *v1.Time + getDeletionTimestampMutex sync.RWMutex + getDeletionTimestampArgsForCall []struct { + } + getDeletionTimestampReturns struct { + result1 *v1.Time + } + getDeletionTimestampReturnsOnCall map[int]struct { + result1 *v1.Time + } + GetFinalizersStub func() []string + getFinalizersMutex sync.RWMutex + getFinalizersArgsForCall []struct { + } + getFinalizersReturns struct { + result1 []string + } + getFinalizersReturnsOnCall map[int]struct { + result1 []string + } + GetGenerateNameStub func() string + getGenerateNameMutex sync.RWMutex + getGenerateNameArgsForCall []struct { + } + getGenerateNameReturns struct { + result1 string + } + getGenerateNameReturnsOnCall map[int]struct { + result1 string + } + GetGenerationStub func() int64 + getGenerationMutex sync.RWMutex + getGenerationArgsForCall []struct { + } + getGenerationReturns struct { + result1 int64 + } + getGenerationReturnsOnCall map[int]struct { + result1 int64 + } + GetLabelsStub func() map[string]string + getLabelsMutex sync.RWMutex + getLabelsArgsForCall []struct { + } + getLabelsReturns struct { + result1 map[string]string + } + getLabelsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetManagedFieldsStub func() []v1.ManagedFieldsEntry + getManagedFieldsMutex sync.RWMutex + getManagedFieldsArgsForCall []struct { + } + getManagedFieldsReturns struct { + result1 []v1.ManagedFieldsEntry + } + getManagedFieldsReturnsOnCall map[int]struct { + result1 []v1.ManagedFieldsEntry + } + GetNameStub func() string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetNamespaceStub func() string + getNamespaceMutex sync.RWMutex + getNamespaceArgsForCall []struct { + } + getNamespaceReturns struct { + result1 string + } + getNamespaceReturnsOnCall map[int]struct { + result1 string + } + GetOwnerReferencesStub func() []v1.OwnerReference + getOwnerReferencesMutex sync.RWMutex + getOwnerReferencesArgsForCall []struct { + } + getOwnerReferencesReturns struct { + result1 []v1.OwnerReference + } + getOwnerReferencesReturnsOnCall map[int]struct { + result1 []v1.OwnerReference + } + GetPullSecretsStub func() []v1a.LocalObjectReference + getPullSecretsMutex sync.RWMutex + getPullSecretsArgsForCall []struct { + } + getPullSecretsReturns struct { + result1 []v1a.LocalObjectReference + } + getPullSecretsReturnsOnCall map[int]struct { + result1 []v1a.LocalObjectReference + } + GetResourceStub func(v1beta1.Component) v1a.ResourceRequirements + getResourceMutex sync.RWMutex + getResourceArgsForCall []struct { + arg1 v1beta1.Component + } + getResourceReturns struct { + result1 v1a.ResourceRequirements + } + getResourceReturnsOnCall map[int]struct { + result1 v1a.ResourceRequirements + } + GetResourceVersionStub func() string + getResourceVersionMutex sync.RWMutex + getResourceVersionArgsForCall []struct { + } + getResourceVersionReturns struct { + result1 string + } + getResourceVersionReturnsOnCall map[int]struct { + result1 string + } + GetSelfLinkStub func() string + getSelfLinkMutex sync.RWMutex + getSelfLinkArgsForCall []struct { + } + getSelfLinkReturns struct { + result1 string + } + getSelfLinkReturnsOnCall map[int]struct { + result1 string + } + GetUIDStub func() types.UID + getUIDMutex sync.RWMutex + getUIDArgsForCall []struct { + } + getUIDReturns struct { + result1 types.UID + } + getUIDReturnsOnCall map[int]struct { + result1 types.UID + } + PVCNameStub func() string + pVCNameMutex sync.RWMutex + pVCNameArgsForCall []struct { + } + pVCNameReturns struct { + result1 string + } + pVCNameReturnsOnCall map[int]struct { + result1 string + } + SetAnnotationsStub func(map[string]string) + setAnnotationsMutex sync.RWMutex + setAnnotationsArgsForCall []struct { + arg1 map[string]string + } + SetClusterNameStub func(string) + setClusterNameMutex sync.RWMutex + setClusterNameArgsForCall []struct { + arg1 string + } + SetCreationTimestampStub func(v1.Time) + setCreationTimestampMutex sync.RWMutex + setCreationTimestampArgsForCall []struct { + arg1 v1.Time + } + SetDeletionGracePeriodSecondsStub func(*int64) + setDeletionGracePeriodSecondsMutex sync.RWMutex + setDeletionGracePeriodSecondsArgsForCall []struct { + arg1 *int64 + } + SetDeletionTimestampStub func(*v1.Time) + setDeletionTimestampMutex sync.RWMutex + setDeletionTimestampArgsForCall []struct { + arg1 *v1.Time + } + SetFinalizersStub func([]string) + setFinalizersMutex sync.RWMutex + setFinalizersArgsForCall []struct { + arg1 []string + } + SetGenerateNameStub func(string) + setGenerateNameMutex sync.RWMutex + setGenerateNameArgsForCall []struct { + arg1 string + } + SetGenerationStub func(int64) + setGenerationMutex sync.RWMutex + setGenerationArgsForCall []struct { + arg1 int64 + } + SetLabelsStub func(map[string]string) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 map[string]string + } + SetManagedFieldsStub func([]v1.ManagedFieldsEntry) + setManagedFieldsMutex sync.RWMutex + setManagedFieldsArgsForCall []struct { + arg1 []v1.ManagedFieldsEntry + } + SetNameStub func(string) + setNameMutex sync.RWMutex + setNameArgsForCall []struct { + arg1 string + } + SetNamespaceStub func(string) + setNamespaceMutex sync.RWMutex + setNamespaceArgsForCall []struct { + arg1 string + } + SetOwnerReferencesStub func([]v1.OwnerReference) + setOwnerReferencesMutex sync.RWMutex + setOwnerReferencesArgsForCall []struct { + arg1 []v1.OwnerReference + } + SetResourceVersionStub func(string) + setResourceVersionMutex sync.RWMutex + setResourceVersionArgsForCall []struct { + arg1 string + } + SetSelfLinkStub func(string) + setSelfLinkMutex sync.RWMutex + setSelfLinkArgsForCall []struct { + arg1 string + } + SetUIDStub func(types.UID) + setUIDMutex sync.RWMutex + setUIDArgsForCall []struct { + arg1 types.UID + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Instance) EnrollerImage() string { + fake.enrollerImageMutex.Lock() + ret, specificReturn := fake.enrollerImageReturnsOnCall[len(fake.enrollerImageArgsForCall)] + fake.enrollerImageArgsForCall = append(fake.enrollerImageArgsForCall, struct { + }{}) + stub := fake.EnrollerImageStub + fakeReturns := fake.enrollerImageReturns + fake.recordInvocation("EnrollerImage", []interface{}{}) + fake.enrollerImageMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) EnrollerImageCallCount() int { + fake.enrollerImageMutex.RLock() + defer fake.enrollerImageMutex.RUnlock() + return len(fake.enrollerImageArgsForCall) +} + +func (fake *Instance) EnrollerImageCalls(stub func() string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = stub +} + +func (fake *Instance) EnrollerImageReturns(result1 string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = nil + fake.enrollerImageReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) EnrollerImageReturnsOnCall(i int, result1 string) { + fake.enrollerImageMutex.Lock() + defer fake.enrollerImageMutex.Unlock() + fake.EnrollerImageStub = nil + if fake.enrollerImageReturnsOnCall == nil { + fake.enrollerImageReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.enrollerImageReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetAnnotations() map[string]string { + fake.getAnnotationsMutex.Lock() + ret, specificReturn := fake.getAnnotationsReturnsOnCall[len(fake.getAnnotationsArgsForCall)] + fake.getAnnotationsArgsForCall = append(fake.getAnnotationsArgsForCall, struct { + }{}) + stub := fake.GetAnnotationsStub + fakeReturns := fake.getAnnotationsReturns + fake.recordInvocation("GetAnnotations", []interface{}{}) + fake.getAnnotationsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetAnnotationsCallCount() int { + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + return len(fake.getAnnotationsArgsForCall) +} + +func (fake *Instance) GetAnnotationsCalls(stub func() map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = stub +} + +func (fake *Instance) GetAnnotationsReturns(result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + fake.getAnnotationsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetAnnotationsReturnsOnCall(i int, result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + if fake.getAnnotationsReturnsOnCall == nil { + fake.getAnnotationsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getAnnotationsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetClusterName() string { + fake.getClusterNameMutex.Lock() + ret, specificReturn := fake.getClusterNameReturnsOnCall[len(fake.getClusterNameArgsForCall)] + fake.getClusterNameArgsForCall = append(fake.getClusterNameArgsForCall, struct { + }{}) + stub := fake.GetClusterNameStub + fakeReturns := fake.getClusterNameReturns + fake.recordInvocation("GetClusterName", []interface{}{}) + fake.getClusterNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetClusterNameCallCount() int { + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + return len(fake.getClusterNameArgsForCall) +} + +func (fake *Instance) GetClusterNameCalls(stub func() string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = stub +} + +func (fake *Instance) GetClusterNameReturns(result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + fake.getClusterNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetClusterNameReturnsOnCall(i int, result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + if fake.getClusterNameReturnsOnCall == nil { + fake.getClusterNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getClusterNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetCreationTimestamp() v1.Time { + fake.getCreationTimestampMutex.Lock() + ret, specificReturn := fake.getCreationTimestampReturnsOnCall[len(fake.getCreationTimestampArgsForCall)] + fake.getCreationTimestampArgsForCall = append(fake.getCreationTimestampArgsForCall, struct { + }{}) + stub := fake.GetCreationTimestampStub + fakeReturns := fake.getCreationTimestampReturns + fake.recordInvocation("GetCreationTimestamp", []interface{}{}) + fake.getCreationTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetCreationTimestampCallCount() int { + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + return len(fake.getCreationTimestampArgsForCall) +} + +func (fake *Instance) GetCreationTimestampCalls(stub func() v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = stub +} + +func (fake *Instance) GetCreationTimestampReturns(result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + fake.getCreationTimestampReturns = struct { + result1 v1.Time + }{result1} +} + +func (fake *Instance) GetCreationTimestampReturnsOnCall(i int, result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + if fake.getCreationTimestampReturnsOnCall == nil { + fake.getCreationTimestampReturnsOnCall = make(map[int]struct { + result1 v1.Time + }) + } + fake.getCreationTimestampReturnsOnCall[i] = struct { + result1 v1.Time + }{result1} +} + +func (fake *Instance) GetDeletionGracePeriodSeconds() *int64 { + fake.getDeletionGracePeriodSecondsMutex.Lock() + ret, specificReturn := fake.getDeletionGracePeriodSecondsReturnsOnCall[len(fake.getDeletionGracePeriodSecondsArgsForCall)] + fake.getDeletionGracePeriodSecondsArgsForCall = append(fake.getDeletionGracePeriodSecondsArgsForCall, struct { + }{}) + stub := fake.GetDeletionGracePeriodSecondsStub + fakeReturns := fake.getDeletionGracePeriodSecondsReturns + fake.recordInvocation("GetDeletionGracePeriodSeconds", []interface{}{}) + fake.getDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetDeletionGracePeriodSecondsCallCount() int { + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.getDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *Instance) GetDeletionGracePeriodSecondsCalls(stub func() *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = stub +} + +func (fake *Instance) GetDeletionGracePeriodSecondsReturns(result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + fake.getDeletionGracePeriodSecondsReturns = struct { + result1 *int64 + }{result1} +} + +func (fake *Instance) GetDeletionGracePeriodSecondsReturnsOnCall(i int, result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + if fake.getDeletionGracePeriodSecondsReturnsOnCall == nil { + fake.getDeletionGracePeriodSecondsReturnsOnCall = make(map[int]struct { + result1 *int64 + }) + } + fake.getDeletionGracePeriodSecondsReturnsOnCall[i] = struct { + result1 *int64 + }{result1} +} + +func (fake *Instance) GetDeletionTimestamp() *v1.Time { + fake.getDeletionTimestampMutex.Lock() + ret, specificReturn := fake.getDeletionTimestampReturnsOnCall[len(fake.getDeletionTimestampArgsForCall)] + fake.getDeletionTimestampArgsForCall = append(fake.getDeletionTimestampArgsForCall, struct { + }{}) + stub := fake.GetDeletionTimestampStub + fakeReturns := fake.getDeletionTimestampReturns + fake.recordInvocation("GetDeletionTimestamp", []interface{}{}) + fake.getDeletionTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetDeletionTimestampCallCount() int { + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + return len(fake.getDeletionTimestampArgsForCall) +} + +func (fake *Instance) GetDeletionTimestampCalls(stub func() *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = stub +} + +func (fake *Instance) GetDeletionTimestampReturns(result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + fake.getDeletionTimestampReturns = struct { + result1 *v1.Time + }{result1} +} + +func (fake *Instance) GetDeletionTimestampReturnsOnCall(i int, result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + if fake.getDeletionTimestampReturnsOnCall == nil { + fake.getDeletionTimestampReturnsOnCall = make(map[int]struct { + result1 *v1.Time + }) + } + fake.getDeletionTimestampReturnsOnCall[i] = struct { + result1 *v1.Time + }{result1} +} + +func (fake *Instance) GetFinalizers() []string { + fake.getFinalizersMutex.Lock() + ret, specificReturn := fake.getFinalizersReturnsOnCall[len(fake.getFinalizersArgsForCall)] + fake.getFinalizersArgsForCall = append(fake.getFinalizersArgsForCall, struct { + }{}) + stub := fake.GetFinalizersStub + fakeReturns := fake.getFinalizersReturns + fake.recordInvocation("GetFinalizers", []interface{}{}) + fake.getFinalizersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetFinalizersCallCount() int { + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + return len(fake.getFinalizersArgsForCall) +} + +func (fake *Instance) GetFinalizersCalls(stub func() []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = stub +} + +func (fake *Instance) GetFinalizersReturns(result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + fake.getFinalizersReturns = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetFinalizersReturnsOnCall(i int, result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + if fake.getFinalizersReturnsOnCall == nil { + fake.getFinalizersReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getFinalizersReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetGenerateName() string { + fake.getGenerateNameMutex.Lock() + ret, specificReturn := fake.getGenerateNameReturnsOnCall[len(fake.getGenerateNameArgsForCall)] + fake.getGenerateNameArgsForCall = append(fake.getGenerateNameArgsForCall, struct { + }{}) + stub := fake.GetGenerateNameStub + fakeReturns := fake.getGenerateNameReturns + fake.recordInvocation("GetGenerateName", []interface{}{}) + fake.getGenerateNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetGenerateNameCallCount() int { + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + return len(fake.getGenerateNameArgsForCall) +} + +func (fake *Instance) GetGenerateNameCalls(stub func() string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = stub +} + +func (fake *Instance) GetGenerateNameReturns(result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + fake.getGenerateNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetGenerateNameReturnsOnCall(i int, result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + if fake.getGenerateNameReturnsOnCall == nil { + fake.getGenerateNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getGenerateNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetGeneration() int64 { + fake.getGenerationMutex.Lock() + ret, specificReturn := fake.getGenerationReturnsOnCall[len(fake.getGenerationArgsForCall)] + fake.getGenerationArgsForCall = append(fake.getGenerationArgsForCall, struct { + }{}) + stub := fake.GetGenerationStub + fakeReturns := fake.getGenerationReturns + fake.recordInvocation("GetGeneration", []interface{}{}) + fake.getGenerationMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetGenerationCallCount() int { + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + return len(fake.getGenerationArgsForCall) +} + +func (fake *Instance) GetGenerationCalls(stub func() int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = stub +} + +func (fake *Instance) GetGenerationReturns(result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + fake.getGenerationReturns = struct { + result1 int64 + }{result1} +} + +func (fake *Instance) GetGenerationReturnsOnCall(i int, result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + if fake.getGenerationReturnsOnCall == nil { + fake.getGenerationReturnsOnCall = make(map[int]struct { + result1 int64 + }) + } + fake.getGenerationReturnsOnCall[i] = struct { + result1 int64 + }{result1} +} + +func (fake *Instance) GetLabels() map[string]string { + fake.getLabelsMutex.Lock() + ret, specificReturn := fake.getLabelsReturnsOnCall[len(fake.getLabelsArgsForCall)] + fake.getLabelsArgsForCall = append(fake.getLabelsArgsForCall, struct { + }{}) + stub := fake.GetLabelsStub + fakeReturns := fake.getLabelsReturns + fake.recordInvocation("GetLabels", []interface{}{}) + fake.getLabelsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetLabelsCallCount() int { + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + return len(fake.getLabelsArgsForCall) +} + +func (fake *Instance) GetLabelsCalls(stub func() map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = stub +} + +func (fake *Instance) GetLabelsReturns(result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + fake.getLabelsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetLabelsReturnsOnCall(i int, result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + if fake.getLabelsReturnsOnCall == nil { + fake.getLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getLabelsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetManagedFields() []v1.ManagedFieldsEntry { + fake.getManagedFieldsMutex.Lock() + ret, specificReturn := fake.getManagedFieldsReturnsOnCall[len(fake.getManagedFieldsArgsForCall)] + fake.getManagedFieldsArgsForCall = append(fake.getManagedFieldsArgsForCall, struct { + }{}) + stub := fake.GetManagedFieldsStub + fakeReturns := fake.getManagedFieldsReturns + fake.recordInvocation("GetManagedFields", []interface{}{}) + fake.getManagedFieldsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetManagedFieldsCallCount() int { + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + return len(fake.getManagedFieldsArgsForCall) +} + +func (fake *Instance) GetManagedFieldsCalls(stub func() []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = stub +} + +func (fake *Instance) GetManagedFieldsReturns(result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + fake.getManagedFieldsReturns = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *Instance) GetManagedFieldsReturnsOnCall(i int, result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + if fake.getManagedFieldsReturnsOnCall == nil { + fake.getManagedFieldsReturnsOnCall = make(map[int]struct { + result1 []v1.ManagedFieldsEntry + }) + } + fake.getManagedFieldsReturnsOnCall[i] = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *Instance) GetName() string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + }{}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *Instance) GetNameCalls(stub func() string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *Instance) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetNamespace() string { + fake.getNamespaceMutex.Lock() + ret, specificReturn := fake.getNamespaceReturnsOnCall[len(fake.getNamespaceArgsForCall)] + fake.getNamespaceArgsForCall = append(fake.getNamespaceArgsForCall, struct { + }{}) + stub := fake.GetNamespaceStub + fakeReturns := fake.getNamespaceReturns + fake.recordInvocation("GetNamespace", []interface{}{}) + fake.getNamespaceMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetNamespaceCallCount() int { + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + return len(fake.getNamespaceArgsForCall) +} + +func (fake *Instance) GetNamespaceCalls(stub func() string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = stub +} + +func (fake *Instance) GetNamespaceReturns(result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + fake.getNamespaceReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetNamespaceReturnsOnCall(i int, result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + if fake.getNamespaceReturnsOnCall == nil { + fake.getNamespaceReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNamespaceReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetOwnerReferences() []v1.OwnerReference { + fake.getOwnerReferencesMutex.Lock() + ret, specificReturn := fake.getOwnerReferencesReturnsOnCall[len(fake.getOwnerReferencesArgsForCall)] + fake.getOwnerReferencesArgsForCall = append(fake.getOwnerReferencesArgsForCall, struct { + }{}) + stub := fake.GetOwnerReferencesStub + fakeReturns := fake.getOwnerReferencesReturns + fake.recordInvocation("GetOwnerReferences", []interface{}{}) + fake.getOwnerReferencesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetOwnerReferencesCallCount() int { + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + return len(fake.getOwnerReferencesArgsForCall) +} + +func (fake *Instance) GetOwnerReferencesCalls(stub func() []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = stub +} + +func (fake *Instance) GetOwnerReferencesReturns(result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + fake.getOwnerReferencesReturns = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *Instance) GetOwnerReferencesReturnsOnCall(i int, result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + if fake.getOwnerReferencesReturnsOnCall == nil { + fake.getOwnerReferencesReturnsOnCall = make(map[int]struct { + result1 []v1.OwnerReference + }) + } + fake.getOwnerReferencesReturnsOnCall[i] = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *Instance) GetPullSecrets() []v1a.LocalObjectReference { + fake.getPullSecretsMutex.Lock() + ret, specificReturn := fake.getPullSecretsReturnsOnCall[len(fake.getPullSecretsArgsForCall)] + fake.getPullSecretsArgsForCall = append(fake.getPullSecretsArgsForCall, struct { + }{}) + stub := fake.GetPullSecretsStub + fakeReturns := fake.getPullSecretsReturns + fake.recordInvocation("GetPullSecrets", []interface{}{}) + fake.getPullSecretsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetPullSecretsCallCount() int { + fake.getPullSecretsMutex.RLock() + defer fake.getPullSecretsMutex.RUnlock() + return len(fake.getPullSecretsArgsForCall) +} + +func (fake *Instance) GetPullSecretsCalls(stub func() []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = stub +} + +func (fake *Instance) GetPullSecretsReturns(result1 []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = nil + fake.getPullSecretsReturns = struct { + result1 []v1a.LocalObjectReference + }{result1} +} + +func (fake *Instance) GetPullSecretsReturnsOnCall(i int, result1 []v1a.LocalObjectReference) { + fake.getPullSecretsMutex.Lock() + defer fake.getPullSecretsMutex.Unlock() + fake.GetPullSecretsStub = nil + if fake.getPullSecretsReturnsOnCall == nil { + fake.getPullSecretsReturnsOnCall = make(map[int]struct { + result1 []v1a.LocalObjectReference + }) + } + fake.getPullSecretsReturnsOnCall[i] = struct { + result1 []v1a.LocalObjectReference + }{result1} +} + +func (fake *Instance) GetResource(arg1 v1beta1.Component) v1a.ResourceRequirements { + fake.getResourceMutex.Lock() + ret, specificReturn := fake.getResourceReturnsOnCall[len(fake.getResourceArgsForCall)] + fake.getResourceArgsForCall = append(fake.getResourceArgsForCall, struct { + arg1 v1beta1.Component + }{arg1}) + stub := fake.GetResourceStub + fakeReturns := fake.getResourceReturns + fake.recordInvocation("GetResource", []interface{}{arg1}) + fake.getResourceMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetResourceCallCount() int { + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + return len(fake.getResourceArgsForCall) +} + +func (fake *Instance) GetResourceCalls(stub func(v1beta1.Component) v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = stub +} + +func (fake *Instance) GetResourceArgsForCall(i int) v1beta1.Component { + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + argsForCall := fake.getResourceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) GetResourceReturns(result1 v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = nil + fake.getResourceReturns = struct { + result1 v1a.ResourceRequirements + }{result1} +} + +func (fake *Instance) GetResourceReturnsOnCall(i int, result1 v1a.ResourceRequirements) { + fake.getResourceMutex.Lock() + defer fake.getResourceMutex.Unlock() + fake.GetResourceStub = nil + if fake.getResourceReturnsOnCall == nil { + fake.getResourceReturnsOnCall = make(map[int]struct { + result1 v1a.ResourceRequirements + }) + } + fake.getResourceReturnsOnCall[i] = struct { + result1 v1a.ResourceRequirements + }{result1} +} + +func (fake *Instance) GetResourceVersion() string { + fake.getResourceVersionMutex.Lock() + ret, specificReturn := fake.getResourceVersionReturnsOnCall[len(fake.getResourceVersionArgsForCall)] + fake.getResourceVersionArgsForCall = append(fake.getResourceVersionArgsForCall, struct { + }{}) + stub := fake.GetResourceVersionStub + fakeReturns := fake.getResourceVersionReturns + fake.recordInvocation("GetResourceVersion", []interface{}{}) + fake.getResourceVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetResourceVersionCallCount() int { + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + return len(fake.getResourceVersionArgsForCall) +} + +func (fake *Instance) GetResourceVersionCalls(stub func() string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = stub +} + +func (fake *Instance) GetResourceVersionReturns(result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + fake.getResourceVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetResourceVersionReturnsOnCall(i int, result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + if fake.getResourceVersionReturnsOnCall == nil { + fake.getResourceVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getResourceVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetSelfLink() string { + fake.getSelfLinkMutex.Lock() + ret, specificReturn := fake.getSelfLinkReturnsOnCall[len(fake.getSelfLinkArgsForCall)] + fake.getSelfLinkArgsForCall = append(fake.getSelfLinkArgsForCall, struct { + }{}) + stub := fake.GetSelfLinkStub + fakeReturns := fake.getSelfLinkReturns + fake.recordInvocation("GetSelfLink", []interface{}{}) + fake.getSelfLinkMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetSelfLinkCallCount() int { + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + return len(fake.getSelfLinkArgsForCall) +} + +func (fake *Instance) GetSelfLinkCalls(stub func() string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = stub +} + +func (fake *Instance) GetSelfLinkReturns(result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + fake.getSelfLinkReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetSelfLinkReturnsOnCall(i int, result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + if fake.getSelfLinkReturnsOnCall == nil { + fake.getSelfLinkReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getSelfLinkReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetUID() types.UID { + fake.getUIDMutex.Lock() + ret, specificReturn := fake.getUIDReturnsOnCall[len(fake.getUIDArgsForCall)] + fake.getUIDArgsForCall = append(fake.getUIDArgsForCall, struct { + }{}) + stub := fake.GetUIDStub + fakeReturns := fake.getUIDReturns + fake.recordInvocation("GetUID", []interface{}{}) + fake.getUIDMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetUIDCallCount() int { + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + return len(fake.getUIDArgsForCall) +} + +func (fake *Instance) GetUIDCalls(stub func() types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = stub +} + +func (fake *Instance) GetUIDReturns(result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + fake.getUIDReturns = struct { + result1 types.UID + }{result1} +} + +func (fake *Instance) GetUIDReturnsOnCall(i int, result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + if fake.getUIDReturnsOnCall == nil { + fake.getUIDReturnsOnCall = make(map[int]struct { + result1 types.UID + }) + } + fake.getUIDReturnsOnCall[i] = struct { + result1 types.UID + }{result1} +} + +func (fake *Instance) PVCName() string { + fake.pVCNameMutex.Lock() + ret, specificReturn := fake.pVCNameReturnsOnCall[len(fake.pVCNameArgsForCall)] + fake.pVCNameArgsForCall = append(fake.pVCNameArgsForCall, struct { + }{}) + stub := fake.PVCNameStub + fakeReturns := fake.pVCNameReturns + fake.recordInvocation("PVCName", []interface{}{}) + fake.pVCNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) PVCNameCallCount() int { + fake.pVCNameMutex.RLock() + defer fake.pVCNameMutex.RUnlock() + return len(fake.pVCNameArgsForCall) +} + +func (fake *Instance) PVCNameCalls(stub func() string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = stub +} + +func (fake *Instance) PVCNameReturns(result1 string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = nil + fake.pVCNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) PVCNameReturnsOnCall(i int, result1 string) { + fake.pVCNameMutex.Lock() + defer fake.pVCNameMutex.Unlock() + fake.PVCNameStub = nil + if fake.pVCNameReturnsOnCall == nil { + fake.pVCNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.pVCNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) SetAnnotations(arg1 map[string]string) { + fake.setAnnotationsMutex.Lock() + fake.setAnnotationsArgsForCall = append(fake.setAnnotationsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetAnnotationsStub + fake.recordInvocation("SetAnnotations", []interface{}{arg1}) + fake.setAnnotationsMutex.Unlock() + if stub != nil { + fake.SetAnnotationsStub(arg1) + } +} + +func (fake *Instance) SetAnnotationsCallCount() int { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + return len(fake.setAnnotationsArgsForCall) +} + +func (fake *Instance) SetAnnotationsCalls(stub func(map[string]string)) { + fake.setAnnotationsMutex.Lock() + defer fake.setAnnotationsMutex.Unlock() + fake.SetAnnotationsStub = stub +} + +func (fake *Instance) SetAnnotationsArgsForCall(i int) map[string]string { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + argsForCall := fake.setAnnotationsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetClusterName(arg1 string) { + fake.setClusterNameMutex.Lock() + fake.setClusterNameArgsForCall = append(fake.setClusterNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetClusterNameStub + fake.recordInvocation("SetClusterName", []interface{}{arg1}) + fake.setClusterNameMutex.Unlock() + if stub != nil { + fake.SetClusterNameStub(arg1) + } +} + +func (fake *Instance) SetClusterNameCallCount() int { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + return len(fake.setClusterNameArgsForCall) +} + +func (fake *Instance) SetClusterNameCalls(stub func(string)) { + fake.setClusterNameMutex.Lock() + defer fake.setClusterNameMutex.Unlock() + fake.SetClusterNameStub = stub +} + +func (fake *Instance) SetClusterNameArgsForCall(i int) string { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + argsForCall := fake.setClusterNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetCreationTimestamp(arg1 v1.Time) { + fake.setCreationTimestampMutex.Lock() + fake.setCreationTimestampArgsForCall = append(fake.setCreationTimestampArgsForCall, struct { + arg1 v1.Time + }{arg1}) + stub := fake.SetCreationTimestampStub + fake.recordInvocation("SetCreationTimestamp", []interface{}{arg1}) + fake.setCreationTimestampMutex.Unlock() + if stub != nil { + fake.SetCreationTimestampStub(arg1) + } +} + +func (fake *Instance) SetCreationTimestampCallCount() int { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + return len(fake.setCreationTimestampArgsForCall) +} + +func (fake *Instance) SetCreationTimestampCalls(stub func(v1.Time)) { + fake.setCreationTimestampMutex.Lock() + defer fake.setCreationTimestampMutex.Unlock() + fake.SetCreationTimestampStub = stub +} + +func (fake *Instance) SetCreationTimestampArgsForCall(i int) v1.Time { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + argsForCall := fake.setCreationTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetDeletionGracePeriodSeconds(arg1 *int64) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + fake.setDeletionGracePeriodSecondsArgsForCall = append(fake.setDeletionGracePeriodSecondsArgsForCall, struct { + arg1 *int64 + }{arg1}) + stub := fake.SetDeletionGracePeriodSecondsStub + fake.recordInvocation("SetDeletionGracePeriodSeconds", []interface{}{arg1}) + fake.setDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + fake.SetDeletionGracePeriodSecondsStub(arg1) + } +} + +func (fake *Instance) SetDeletionGracePeriodSecondsCallCount() int { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.setDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *Instance) SetDeletionGracePeriodSecondsCalls(stub func(*int64)) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + defer fake.setDeletionGracePeriodSecondsMutex.Unlock() + fake.SetDeletionGracePeriodSecondsStub = stub +} + +func (fake *Instance) SetDeletionGracePeriodSecondsArgsForCall(i int) *int64 { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + argsForCall := fake.setDeletionGracePeriodSecondsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetDeletionTimestamp(arg1 *v1.Time) { + fake.setDeletionTimestampMutex.Lock() + fake.setDeletionTimestampArgsForCall = append(fake.setDeletionTimestampArgsForCall, struct { + arg1 *v1.Time + }{arg1}) + stub := fake.SetDeletionTimestampStub + fake.recordInvocation("SetDeletionTimestamp", []interface{}{arg1}) + fake.setDeletionTimestampMutex.Unlock() + if stub != nil { + fake.SetDeletionTimestampStub(arg1) + } +} + +func (fake *Instance) SetDeletionTimestampCallCount() int { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + return len(fake.setDeletionTimestampArgsForCall) +} + +func (fake *Instance) SetDeletionTimestampCalls(stub func(*v1.Time)) { + fake.setDeletionTimestampMutex.Lock() + defer fake.setDeletionTimestampMutex.Unlock() + fake.SetDeletionTimestampStub = stub +} + +func (fake *Instance) SetDeletionTimestampArgsForCall(i int) *v1.Time { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + argsForCall := fake.setDeletionTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetFinalizers(arg1 []string) { + var arg1Copy []string + if arg1 != nil { + arg1Copy = make([]string, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setFinalizersMutex.Lock() + fake.setFinalizersArgsForCall = append(fake.setFinalizersArgsForCall, struct { + arg1 []string + }{arg1Copy}) + stub := fake.SetFinalizersStub + fake.recordInvocation("SetFinalizers", []interface{}{arg1Copy}) + fake.setFinalizersMutex.Unlock() + if stub != nil { + fake.SetFinalizersStub(arg1) + } +} + +func (fake *Instance) SetFinalizersCallCount() int { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + return len(fake.setFinalizersArgsForCall) +} + +func (fake *Instance) SetFinalizersCalls(stub func([]string)) { + fake.setFinalizersMutex.Lock() + defer fake.setFinalizersMutex.Unlock() + fake.SetFinalizersStub = stub +} + +func (fake *Instance) SetFinalizersArgsForCall(i int) []string { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + argsForCall := fake.setFinalizersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetGenerateName(arg1 string) { + fake.setGenerateNameMutex.Lock() + fake.setGenerateNameArgsForCall = append(fake.setGenerateNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetGenerateNameStub + fake.recordInvocation("SetGenerateName", []interface{}{arg1}) + fake.setGenerateNameMutex.Unlock() + if stub != nil { + fake.SetGenerateNameStub(arg1) + } +} + +func (fake *Instance) SetGenerateNameCallCount() int { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + return len(fake.setGenerateNameArgsForCall) +} + +func (fake *Instance) SetGenerateNameCalls(stub func(string)) { + fake.setGenerateNameMutex.Lock() + defer fake.setGenerateNameMutex.Unlock() + fake.SetGenerateNameStub = stub +} + +func (fake *Instance) SetGenerateNameArgsForCall(i int) string { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + argsForCall := fake.setGenerateNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetGeneration(arg1 int64) { + fake.setGenerationMutex.Lock() + fake.setGenerationArgsForCall = append(fake.setGenerationArgsForCall, struct { + arg1 int64 + }{arg1}) + stub := fake.SetGenerationStub + fake.recordInvocation("SetGeneration", []interface{}{arg1}) + fake.setGenerationMutex.Unlock() + if stub != nil { + fake.SetGenerationStub(arg1) + } +} + +func (fake *Instance) SetGenerationCallCount() int { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + return len(fake.setGenerationArgsForCall) +} + +func (fake *Instance) SetGenerationCalls(stub func(int64)) { + fake.setGenerationMutex.Lock() + defer fake.setGenerationMutex.Unlock() + fake.SetGenerationStub = stub +} + +func (fake *Instance) SetGenerationArgsForCall(i int) int64 { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + argsForCall := fake.setGenerationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetLabels(arg1 map[string]string) { + fake.setLabelsMutex.Lock() + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetLabelsStub + fake.recordInvocation("SetLabels", []interface{}{arg1}) + fake.setLabelsMutex.Unlock() + if stub != nil { + fake.SetLabelsStub(arg1) + } +} + +func (fake *Instance) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *Instance) SetLabelsCalls(stub func(map[string]string)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *Instance) SetLabelsArgsForCall(i int) map[string]string { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetManagedFields(arg1 []v1.ManagedFieldsEntry) { + var arg1Copy []v1.ManagedFieldsEntry + if arg1 != nil { + arg1Copy = make([]v1.ManagedFieldsEntry, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setManagedFieldsMutex.Lock() + fake.setManagedFieldsArgsForCall = append(fake.setManagedFieldsArgsForCall, struct { + arg1 []v1.ManagedFieldsEntry + }{arg1Copy}) + stub := fake.SetManagedFieldsStub + fake.recordInvocation("SetManagedFields", []interface{}{arg1Copy}) + fake.setManagedFieldsMutex.Unlock() + if stub != nil { + fake.SetManagedFieldsStub(arg1) + } +} + +func (fake *Instance) SetManagedFieldsCallCount() int { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + return len(fake.setManagedFieldsArgsForCall) +} + +func (fake *Instance) SetManagedFieldsCalls(stub func([]v1.ManagedFieldsEntry)) { + fake.setManagedFieldsMutex.Lock() + defer fake.setManagedFieldsMutex.Unlock() + fake.SetManagedFieldsStub = stub +} + +func (fake *Instance) SetManagedFieldsArgsForCall(i int) []v1.ManagedFieldsEntry { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + argsForCall := fake.setManagedFieldsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetName(arg1 string) { + fake.setNameMutex.Lock() + fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNameStub + fake.recordInvocation("SetName", []interface{}{arg1}) + fake.setNameMutex.Unlock() + if stub != nil { + fake.SetNameStub(arg1) + } +} + +func (fake *Instance) SetNameCallCount() int { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + return len(fake.setNameArgsForCall) +} + +func (fake *Instance) SetNameCalls(stub func(string)) { + fake.setNameMutex.Lock() + defer fake.setNameMutex.Unlock() + fake.SetNameStub = stub +} + +func (fake *Instance) SetNameArgsForCall(i int) string { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + argsForCall := fake.setNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetNamespace(arg1 string) { + fake.setNamespaceMutex.Lock() + fake.setNamespaceArgsForCall = append(fake.setNamespaceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNamespaceStub + fake.recordInvocation("SetNamespace", []interface{}{arg1}) + fake.setNamespaceMutex.Unlock() + if stub != nil { + fake.SetNamespaceStub(arg1) + } +} + +func (fake *Instance) SetNamespaceCallCount() int { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + return len(fake.setNamespaceArgsForCall) +} + +func (fake *Instance) SetNamespaceCalls(stub func(string)) { + fake.setNamespaceMutex.Lock() + defer fake.setNamespaceMutex.Unlock() + fake.SetNamespaceStub = stub +} + +func (fake *Instance) SetNamespaceArgsForCall(i int) string { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + argsForCall := fake.setNamespaceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetOwnerReferences(arg1 []v1.OwnerReference) { + var arg1Copy []v1.OwnerReference + if arg1 != nil { + arg1Copy = make([]v1.OwnerReference, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setOwnerReferencesMutex.Lock() + fake.setOwnerReferencesArgsForCall = append(fake.setOwnerReferencesArgsForCall, struct { + arg1 []v1.OwnerReference + }{arg1Copy}) + stub := fake.SetOwnerReferencesStub + fake.recordInvocation("SetOwnerReferences", []interface{}{arg1Copy}) + fake.setOwnerReferencesMutex.Unlock() + if stub != nil { + fake.SetOwnerReferencesStub(arg1) + } +} + +func (fake *Instance) SetOwnerReferencesCallCount() int { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + return len(fake.setOwnerReferencesArgsForCall) +} + +func (fake *Instance) SetOwnerReferencesCalls(stub func([]v1.OwnerReference)) { + fake.setOwnerReferencesMutex.Lock() + defer fake.setOwnerReferencesMutex.Unlock() + fake.SetOwnerReferencesStub = stub +} + +func (fake *Instance) SetOwnerReferencesArgsForCall(i int) []v1.OwnerReference { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + argsForCall := fake.setOwnerReferencesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetResourceVersion(arg1 string) { + fake.setResourceVersionMutex.Lock() + fake.setResourceVersionArgsForCall = append(fake.setResourceVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetResourceVersionStub + fake.recordInvocation("SetResourceVersion", []interface{}{arg1}) + fake.setResourceVersionMutex.Unlock() + if stub != nil { + fake.SetResourceVersionStub(arg1) + } +} + +func (fake *Instance) SetResourceVersionCallCount() int { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + return len(fake.setResourceVersionArgsForCall) +} + +func (fake *Instance) SetResourceVersionCalls(stub func(string)) { + fake.setResourceVersionMutex.Lock() + defer fake.setResourceVersionMutex.Unlock() + fake.SetResourceVersionStub = stub +} + +func (fake *Instance) SetResourceVersionArgsForCall(i int) string { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + argsForCall := fake.setResourceVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetSelfLink(arg1 string) { + fake.setSelfLinkMutex.Lock() + fake.setSelfLinkArgsForCall = append(fake.setSelfLinkArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetSelfLinkStub + fake.recordInvocation("SetSelfLink", []interface{}{arg1}) + fake.setSelfLinkMutex.Unlock() + if stub != nil { + fake.SetSelfLinkStub(arg1) + } +} + +func (fake *Instance) SetSelfLinkCallCount() int { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + return len(fake.setSelfLinkArgsForCall) +} + +func (fake *Instance) SetSelfLinkCalls(stub func(string)) { + fake.setSelfLinkMutex.Lock() + defer fake.setSelfLinkMutex.Unlock() + fake.SetSelfLinkStub = stub +} + +func (fake *Instance) SetSelfLinkArgsForCall(i int) string { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + argsForCall := fake.setSelfLinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetUID(arg1 types.UID) { + fake.setUIDMutex.Lock() + fake.setUIDArgsForCall = append(fake.setUIDArgsForCall, struct { + arg1 types.UID + }{arg1}) + stub := fake.SetUIDStub + fake.recordInvocation("SetUID", []interface{}{arg1}) + fake.setUIDMutex.Unlock() + if stub != nil { + fake.SetUIDStub(arg1) + } +} + +func (fake *Instance) SetUIDCallCount() int { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + return len(fake.setUIDArgsForCall) +} + +func (fake *Instance) SetUIDCalls(stub func(types.UID)) { + fake.setUIDMutex.Lock() + defer fake.setUIDMutex.Unlock() + fake.SetUIDStub = stub +} + +func (fake *Instance) SetUIDArgsForCall(i int) types.UID { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + argsForCall := fake.setUIDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.enrollerImageMutex.RLock() + defer fake.enrollerImageMutex.RUnlock() + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + fake.getPullSecretsMutex.RLock() + defer fake.getPullSecretsMutex.RUnlock() + fake.getResourceMutex.RLock() + defer fake.getResourceMutex.RUnlock() + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + fake.pVCNameMutex.RLock() + defer fake.pVCNameMutex.RUnlock() + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Instance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ enroller.Instance = new(Instance) diff --git a/pkg/initializer/common/enroller/swenroller.go b/pkg/initializer/common/enroller/swenroller.go new file mode 100644 index 00000000..318f5fc8 --- /dev/null +++ b/pkg/initializer/common/enroller/swenroller.go @@ -0,0 +1,162 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/hyperledger/fabric-ca/api" + "github.com/hyperledger/fabric-ca/lib" + "github.com/pkg/errors" +) + +//go:generate counterfeiter -o mocks/caclient.go -fake-name CAClient . CAClient + +type CAClient interface { + Init() error + Enroll(*api.EnrollmentRequest) (*lib.EnrollmentResponse, error) + GetEnrollmentRequest() *current.Enrollment + GetHomeDir() string + GetTLSCert() []byte + PingCA(time.Duration) error +} + +type SWEnroller struct { + Client CAClient +} + +func NewSWEnroller(caClient CAClient) *SWEnroller { + return &SWEnroller{ + Client: caClient, + } +} + +func (e *SWEnroller) GetEnrollmentRequest() *current.Enrollment { + return e.Client.GetEnrollmentRequest() +} + +func (e *SWEnroller) PingCA(timeout time.Duration) error { + return e.Client.PingCA(timeout) +} + +func (e *SWEnroller) Enroll() (*config.Response, error) { + resp, err := enroll(e.Client) + if err != nil { + return nil, err + } + + key, err := e.ReadKey() + if err != nil { + return nil, err + } + resp.Keystore = key + + return resp, nil +} + +func (e *SWEnroller) ReadKey() ([]byte, error) { + keystoreDir := filepath.Join(e.Client.GetHomeDir(), "msp", "keystore") + files, err := ioutil.ReadDir(keystoreDir) + if err != nil { + return nil, err + } + + if len(files) > 1 { + return nil, errors.Errorf("expecting only one key file to present in keystore '%s', but found multiple", keystoreDir) + } + + for _, file := range files { + fileBytes, err := ioutil.ReadFile(filepath.Clean(filepath.Join(keystoreDir, file.Name()))) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(fileBytes) + if block == nil { + continue + } + + _, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err == nil { + return fileBytes, nil + } + } + + return nil, errors.Errorf("failed to read private key") +} + +func enroll(client CAClient) (*config.Response, error) { + req := client.GetEnrollmentRequest() + log.Info(fmt.Sprintf("Enrolling with CA '%s'", req.CAHost)) + + err := os.MkdirAll(client.GetHomeDir(), 0750) + if err != nil { + return nil, err + } + + err = util.WriteFile(filepath.Join(client.GetHomeDir(), "tlsCert.pem"), client.GetTLSCert(), 0755) + if err != nil { + return nil, err + } + + err = client.Init() + if err != nil { + return nil, errors.Wrap(err, "failed to initialize CA client") + } + + // Enroll with CA + enrollReq := &api.EnrollmentRequest{ + Type: "x509", + Name: req.EnrollID, + Secret: req.EnrollSecret, + CAName: req.CAName, + } + if req.CSR != nil && len(req.CSR.Hosts) > 0 { + enrollReq.CSR = &api.CSRInfo{ + Hosts: req.CSR.Hosts, + } + } + + enrollResp, err := client.Enroll(enrollReq) + if err != nil { + return nil, errors.Wrap(err, "failed to enroll with CA") + } + + resp := &config.Response{} + resp, err = ParseEnrollmentResponse(resp, &enrollResp.CAInfo) + if err != nil { + return nil, err + } + + id := enrollResp.Identity + if id.GetECert() != nil { + resp.SignCert = id.GetECert().Cert() + } + + return resp, nil +} diff --git a/pkg/initializer/common/enroller/swenroller_test.go b/pkg/initializer/common/enroller/swenroller_test.go new file mode 100644 index 00000000..62c24e38 --- /dev/null +++ b/pkg/initializer/common/enroller/swenroller_test.go @@ -0,0 +1,70 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package enroller_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/hyperledger/fabric-ca/lib" + "github.com/hyperledger/fabric-ca/lib/client/credential" + "github.com/hyperledger/fabric-ca/lib/client/credential/x509" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller/mocks" +) + +var _ = Describe("Software enroller", func() { + var ( + e *enroller.SWEnroller + caClient *mocks.CAClient + ) + + BeforeEach(func() { + caClient = &mocks.CAClient{} + caClient.GetHomeDirReturns("../../../../testdata") + + creds := []credential.Credential{ + x509.NewCredential("", "", nil), + } + caClient.EnrollReturns(&lib.EnrollmentResponse{ + Identity: lib.NewIdentity(nil, "", creds), + }, nil) + caClient.GetEnrollmentRequestReturns(¤t.Enrollment{ + CATLS: ¤t.CATLS{ + CACert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVZi84bk94M2NqM1htVzNDSUo1L0Q1ejRRcUVvd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBek1ERTNNamd3TUZvWERUTTBNVEF5TmpFM01qZ3dNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVSbzNmbUc2UHkyUHd6cUMwNnFWZDlFOFgKZ044eldqZzFMb3lnMmsxdkQ4MXY1dENRRytCTVozSUJGQnI2VTRhc0tZTUREakd6TElERmdUUTRjVDd1VktORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZFa0RtUHhjbTdGcXZSMXllN0tNNGdLLy9KZ1JNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRC92QVFVSEh2SWwKQWZZLzM5UWdEU2ltTWpMZnhPTG44NllyR1EvWHpkQVpBaUFpUmlyZmlMdzVGbXBpRDhtYmlmRjV4bzdFUzdqNApaUWQyT0FUNCt5OWE0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + }, + }) + + e = &enroller.SWEnroller{ + Client: caClient, + } + }) + + Context("enroll", func() { + It("returns no error on successfull enroll", func() { + resp, err := e.Enroll() + Expect(err).NotTo(HaveOccurred()) + Expect(resp).NotTo(BeNil()) + }) + }) + + // TODO: Add more tests for error path testing +}) diff --git a/pkg/initializer/common/mocks/cryptovalidator.go b/pkg/initializer/common/mocks/cryptovalidator.go new file mode 100644 index 00000000..91e94a40 --- /dev/null +++ b/pkg/initializer/common/mocks/cryptovalidator.go @@ -0,0 +1,305 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CryptoValidator struct { + CheckClientAuthCryptoStub func(v1.Object, string) error + checkClientAuthCryptoMutex sync.RWMutex + checkClientAuthCryptoArgsForCall []struct { + arg1 v1.Object + arg2 string + } + checkClientAuthCryptoReturns struct { + result1 error + } + checkClientAuthCryptoReturnsOnCall map[int]struct { + result1 error + } + CheckEcertCryptoStub func(v1.Object, string) error + checkEcertCryptoMutex sync.RWMutex + checkEcertCryptoArgsForCall []struct { + arg1 v1.Object + arg2 string + } + checkEcertCryptoReturns struct { + result1 error + } + checkEcertCryptoReturnsOnCall map[int]struct { + result1 error + } + CheckTLSCryptoStub func(v1.Object, string) error + checkTLSCryptoMutex sync.RWMutex + checkTLSCryptoArgsForCall []struct { + arg1 v1.Object + arg2 string + } + checkTLSCryptoReturns struct { + result1 error + } + checkTLSCryptoReturnsOnCall map[int]struct { + result1 error + } + SetHSMEnabledStub func(bool) + setHSMEnabledMutex sync.RWMutex + setHSMEnabledArgsForCall []struct { + arg1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CryptoValidator) CheckClientAuthCrypto(arg1 v1.Object, arg2 string) error { + fake.checkClientAuthCryptoMutex.Lock() + ret, specificReturn := fake.checkClientAuthCryptoReturnsOnCall[len(fake.checkClientAuthCryptoArgsForCall)] + fake.checkClientAuthCryptoArgsForCall = append(fake.checkClientAuthCryptoArgsForCall, struct { + arg1 v1.Object + arg2 string + }{arg1, arg2}) + stub := fake.CheckClientAuthCryptoStub + fakeReturns := fake.checkClientAuthCryptoReturns + fake.recordInvocation("CheckClientAuthCrypto", []interface{}{arg1, arg2}) + fake.checkClientAuthCryptoMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoValidator) CheckClientAuthCryptoCallCount() int { + fake.checkClientAuthCryptoMutex.RLock() + defer fake.checkClientAuthCryptoMutex.RUnlock() + return len(fake.checkClientAuthCryptoArgsForCall) +} + +func (fake *CryptoValidator) CheckClientAuthCryptoCalls(stub func(v1.Object, string) error) { + fake.checkClientAuthCryptoMutex.Lock() + defer fake.checkClientAuthCryptoMutex.Unlock() + fake.CheckClientAuthCryptoStub = stub +} + +func (fake *CryptoValidator) CheckClientAuthCryptoArgsForCall(i int) (v1.Object, string) { + fake.checkClientAuthCryptoMutex.RLock() + defer fake.checkClientAuthCryptoMutex.RUnlock() + argsForCall := fake.checkClientAuthCryptoArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CryptoValidator) CheckClientAuthCryptoReturns(result1 error) { + fake.checkClientAuthCryptoMutex.Lock() + defer fake.checkClientAuthCryptoMutex.Unlock() + fake.CheckClientAuthCryptoStub = nil + fake.checkClientAuthCryptoReturns = struct { + result1 error + }{result1} +} + +func (fake *CryptoValidator) CheckClientAuthCryptoReturnsOnCall(i int, result1 error) { + fake.checkClientAuthCryptoMutex.Lock() + defer fake.checkClientAuthCryptoMutex.Unlock() + fake.CheckClientAuthCryptoStub = nil + if fake.checkClientAuthCryptoReturnsOnCall == nil { + fake.checkClientAuthCryptoReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkClientAuthCryptoReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CryptoValidator) CheckEcertCrypto(arg1 v1.Object, arg2 string) error { + fake.checkEcertCryptoMutex.Lock() + ret, specificReturn := fake.checkEcertCryptoReturnsOnCall[len(fake.checkEcertCryptoArgsForCall)] + fake.checkEcertCryptoArgsForCall = append(fake.checkEcertCryptoArgsForCall, struct { + arg1 v1.Object + arg2 string + }{arg1, arg2}) + stub := fake.CheckEcertCryptoStub + fakeReturns := fake.checkEcertCryptoReturns + fake.recordInvocation("CheckEcertCrypto", []interface{}{arg1, arg2}) + fake.checkEcertCryptoMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoValidator) CheckEcertCryptoCallCount() int { + fake.checkEcertCryptoMutex.RLock() + defer fake.checkEcertCryptoMutex.RUnlock() + return len(fake.checkEcertCryptoArgsForCall) +} + +func (fake *CryptoValidator) CheckEcertCryptoCalls(stub func(v1.Object, string) error) { + fake.checkEcertCryptoMutex.Lock() + defer fake.checkEcertCryptoMutex.Unlock() + fake.CheckEcertCryptoStub = stub +} + +func (fake *CryptoValidator) CheckEcertCryptoArgsForCall(i int) (v1.Object, string) { + fake.checkEcertCryptoMutex.RLock() + defer fake.checkEcertCryptoMutex.RUnlock() + argsForCall := fake.checkEcertCryptoArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CryptoValidator) CheckEcertCryptoReturns(result1 error) { + fake.checkEcertCryptoMutex.Lock() + defer fake.checkEcertCryptoMutex.Unlock() + fake.CheckEcertCryptoStub = nil + fake.checkEcertCryptoReturns = struct { + result1 error + }{result1} +} + +func (fake *CryptoValidator) CheckEcertCryptoReturnsOnCall(i int, result1 error) { + fake.checkEcertCryptoMutex.Lock() + defer fake.checkEcertCryptoMutex.Unlock() + fake.CheckEcertCryptoStub = nil + if fake.checkEcertCryptoReturnsOnCall == nil { + fake.checkEcertCryptoReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkEcertCryptoReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CryptoValidator) CheckTLSCrypto(arg1 v1.Object, arg2 string) error { + fake.checkTLSCryptoMutex.Lock() + ret, specificReturn := fake.checkTLSCryptoReturnsOnCall[len(fake.checkTLSCryptoArgsForCall)] + fake.checkTLSCryptoArgsForCall = append(fake.checkTLSCryptoArgsForCall, struct { + arg1 v1.Object + arg2 string + }{arg1, arg2}) + stub := fake.CheckTLSCryptoStub + fakeReturns := fake.checkTLSCryptoReturns + fake.recordInvocation("CheckTLSCrypto", []interface{}{arg1, arg2}) + fake.checkTLSCryptoMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CryptoValidator) CheckTLSCryptoCallCount() int { + fake.checkTLSCryptoMutex.RLock() + defer fake.checkTLSCryptoMutex.RUnlock() + return len(fake.checkTLSCryptoArgsForCall) +} + +func (fake *CryptoValidator) CheckTLSCryptoCalls(stub func(v1.Object, string) error) { + fake.checkTLSCryptoMutex.Lock() + defer fake.checkTLSCryptoMutex.Unlock() + fake.CheckTLSCryptoStub = stub +} + +func (fake *CryptoValidator) CheckTLSCryptoArgsForCall(i int) (v1.Object, string) { + fake.checkTLSCryptoMutex.RLock() + defer fake.checkTLSCryptoMutex.RUnlock() + argsForCall := fake.checkTLSCryptoArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CryptoValidator) CheckTLSCryptoReturns(result1 error) { + fake.checkTLSCryptoMutex.Lock() + defer fake.checkTLSCryptoMutex.Unlock() + fake.CheckTLSCryptoStub = nil + fake.checkTLSCryptoReturns = struct { + result1 error + }{result1} +} + +func (fake *CryptoValidator) CheckTLSCryptoReturnsOnCall(i int, result1 error) { + fake.checkTLSCryptoMutex.Lock() + defer fake.checkTLSCryptoMutex.Unlock() + fake.CheckTLSCryptoStub = nil + if fake.checkTLSCryptoReturnsOnCall == nil { + fake.checkTLSCryptoReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkTLSCryptoReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CryptoValidator) SetHSMEnabled(arg1 bool) { + fake.setHSMEnabledMutex.Lock() + fake.setHSMEnabledArgsForCall = append(fake.setHSMEnabledArgsForCall, struct { + arg1 bool + }{arg1}) + stub := fake.SetHSMEnabledStub + fake.recordInvocation("SetHSMEnabled", []interface{}{arg1}) + fake.setHSMEnabledMutex.Unlock() + if stub != nil { + fake.SetHSMEnabledStub(arg1) + } +} + +func (fake *CryptoValidator) SetHSMEnabledCallCount() int { + fake.setHSMEnabledMutex.RLock() + defer fake.setHSMEnabledMutex.RUnlock() + return len(fake.setHSMEnabledArgsForCall) +} + +func (fake *CryptoValidator) SetHSMEnabledCalls(stub func(bool)) { + fake.setHSMEnabledMutex.Lock() + defer fake.setHSMEnabledMutex.Unlock() + fake.SetHSMEnabledStub = stub +} + +func (fake *CryptoValidator) SetHSMEnabledArgsForCall(i int) bool { + fake.setHSMEnabledMutex.RLock() + defer fake.setHSMEnabledMutex.RUnlock() + argsForCall := fake.setHSMEnabledArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *CryptoValidator) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkClientAuthCryptoMutex.RLock() + defer fake.checkClientAuthCryptoMutex.RUnlock() + fake.checkEcertCryptoMutex.RLock() + defer fake.checkEcertCryptoMutex.RUnlock() + fake.checkTLSCryptoMutex.RLock() + defer fake.checkTLSCryptoMutex.RUnlock() + fake.setHSMEnabledMutex.RLock() + defer fake.setHSMEnabledMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CryptoValidator) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ common.CryptoValidator = new(CryptoValidator) diff --git a/pkg/initializer/common/mspparser/mspparser.go b/pkg/initializer/common/mspparser/mspparser.go new file mode 100644 index 00000000..3e56a8e0 --- /dev/null +++ b/pkg/initializer/common/mspparser/mspparser.go @@ -0,0 +1,110 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mspparser + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("peer_init_msp_parser") + +type MSPParser struct { + Config *current.MSP +} + +func New(cfg *current.MSP) *MSPParser { + return &MSPParser{ + Config: cfg, + } +} + +func (m *MSPParser) GetCrypto() (*config.Response, error) { + return m.Parse() +} + +func (m *MSPParser) Parse() (*config.Response, error) { + resp := &config.Response{} + + certBytes, err := util.Base64ToBytes(m.Config.SignCerts) + if err != nil { + return nil, errors.Wrap(err, "failed to parse signcert") + } + resp.SignCert = certBytes + + keyBytes, err := util.Base64ToBytes(m.Config.KeyStore) + if err != nil { + return nil, errors.Wrap(err, "failed to parse keystore") + } + resp.Keystore = keyBytes + + for _, adminCert := range m.Config.AdminCerts { + bytes, err := util.Base64ToBytes(adminCert) + if err != nil { + return nil, errors.Wrap(err, "failed to parse admin cert") + } + resp.AdminCerts = append(resp.AdminCerts, bytes) + } + + for _, interCert := range m.Config.IntermediateCerts { + bytes, err := util.Base64ToBytes(interCert) + if err != nil { + return nil, errors.Wrap(err, "failed to parse intermediate cert") + } + resp.IntermediateCerts = append(resp.IntermediateCerts, bytes) + } + + for _, caCert := range m.Config.CACerts { + bytes, err := util.Base64ToBytes(caCert) + if err != nil { + return nil, errors.Wrap(err, "failed to parse ca cert") + } + resp.CACerts = append(resp.CACerts, bytes) + } + + return resp, nil +} + +// MSP parser requires no interaction with CA, ping CA is a no-op +func (m *MSPParser) PingCA() error { + // no-op + return nil +} + +func (m *MSPParser) Validate() error { + cfg := m.Config + + if cfg.KeyStore == "" { + return errors.New("unable to parse MSP, keystore not specified") + } + + if cfg.SignCerts == "" { + return errors.New("unable to parse MSP, signcert not specified") + } + + if len(cfg.CACerts) == 0 { + return errors.New("unable to parse MSP, ca certs not specified") + } + + return nil +} diff --git a/pkg/initializer/common/mspparser/mspparser_suite_test.go b/pkg/initializer/common/mspparser/mspparser_suite_test.go new file mode 100644 index 00000000..84288193 --- /dev/null +++ b/pkg/initializer/common/mspparser/mspparser_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mspparser_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestMspparser(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Mspparser Suite") +} diff --git a/pkg/initializer/common/mspparser/mspparser_test.go b/pkg/initializer/common/mspparser/mspparser_test.go new file mode 100644 index 00000000..2c573d44 --- /dev/null +++ b/pkg/initializer/common/mspparser/mspparser_test.go @@ -0,0 +1,64 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mspparser_test + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mspparser" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + testkey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" +) + +var _ = Describe("Enrolling the Peer", func() { + var ( + parser *mspparser.MSPParser + config *current.MSP + ) + + BeforeEach(func() { + config = ¤t.MSP{ + KeyStore: testkey, + SignCerts: testcert, + AdminCerts: []string{testcert}, + CACerts: []string{testcert}, + } + + parser = mspparser.New(config) + Expect(parser).NotTo(BeNil()) + }) + + Context("parses peer MSP", func() { + It("returns an error if value passed in base64", func() { + parser.Config.SignCerts = "xyz" + _, err := parser.Parse() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to parse base64 string")) + }) + + It("enrolls with CA for enrollment certificate", func() { + _, err := parser.Parse() + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/initializer/common/secretmanager/secretmanager.go b/pkg/initializer/common/secretmanager/secretmanager.go new file mode 100644 index 00000000..d2d6f5ca --- /dev/null +++ b/pkg/initializer/common/secretmanager/secretmanager.go @@ -0,0 +1,428 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package secretmanager + +import ( + "context" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("secret_manager") + +type SecretManager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + GetLabels func(instance v1.Object) map[string]string +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, labels func(instance v1.Object) map[string]string) *SecretManager { + return &SecretManager{ + Client: client, + Scheme: scheme, + GetLabels: labels, + } +} + +func (s *SecretManager) GenerateSecrets(prefix common.SecretType, instance v1.Object, crypto *config.Response) error { + if crypto == nil { + return nil + } + + if prefix != common.TLS { + err := s.CreateAdminSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "admincerts"), instance, crypto.AdminCerts) + if err != nil { + return errors.Wrap(err, "failed to create admin certs secret") + } + } + + err := s.CreateCACertsSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "cacerts"), instance, crypto.CACerts) + if err != nil { + return errors.Wrap(err, "failed to create ca certs secret") + } + + err = s.CreateIntermediateCertsSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "intercerts"), instance, crypto.IntermediateCerts) + if err != nil { + return errors.Wrap(err, "failed to create intermediate ca certs secret") + } + + err = s.CreateSignCert(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "signcert"), instance, crypto.SignCert) + if err != nil { + return errors.Wrap(err, "failed to create signing cert secret") + } + + err = s.CreateKey(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "keystore"), instance, crypto.Keystore) + if err != nil { + return errors.Wrap(err, "failed to create key secret") + } + + return nil +} + +func (s *SecretManager) UpdateSecrets(prefix common.SecretType, instance v1.Object, crypto *config.Response) error { + // AdminCert updates are checked in base Initialize() code + + err := s.CreateCACertsSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "cacerts"), instance, crypto.CACerts) + if err != nil { + return errors.Wrap(err, "failed to create ca certs secret") + } + + err = s.CreateIntermediateCertsSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "intercerts"), instance, crypto.IntermediateCerts) + if err != nil { + return errors.Wrap(err, "failed to create intermediate ca certs secret") + } + + err = s.CreateSignCert(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "signcert"), instance, crypto.SignCert) + if err != nil { + return errors.Wrap(err, "failed to create signing cert secret") + } + + err = s.CreateKey(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "keystore"), instance, crypto.Keystore) + if err != nil { + return errors.Wrap(err, "failed to create key secret") + } + + return nil +} + +func (s *SecretManager) CreateAdminSecret(name string, instance v1.Object, adminCerts [][]byte) error { + if len(adminCerts) == 0 || string(adminCerts[0]) == "" { + return nil + } + + data := s.GetCertsData("admincert", adminCerts) + err := s.CreateOrUpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) CreateCACertsSecret(name string, instance v1.Object, caCerts [][]byte) error { + if len(caCerts) == 0 { + return nil + } + + data := s.GetCertsData("cacert", caCerts) + err := s.CreateOrUpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) CreateIntermediateCertsSecret(name string, instance v1.Object, interCerts [][]byte) error { + if len(interCerts) == 0 { + return nil + } + + data := s.GetCertsData("intercert", interCerts) + err := s.CreateOrUpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) CreateSignCert(name string, instance v1.Object, cert []byte) error { + if cert == nil || len(cert) == 0 { + return nil + } + + data := map[string][]byte{ + "cert.pem": cert, + } + err := s.CreateOrUpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) CreateKey(name string, instance v1.Object, key []byte) error { + if key == nil { + return nil + } + + data := map[string][]byte{ + "key.pem": key, + } + err := s.CreateOrUpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) CreateOrUpdateSecret(instance v1.Object, name string, data map[string][]byte) error { + log.Info(fmt.Sprintf("Create/Update secret '%s'", name)) + + secret := s.BuildSecret(instance, name, data, s.GetLabels(instance)) + err := s.Client.CreateOrUpdate(context.TODO(), secret, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: s.Scheme, + }) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) UpdateAdminCertSecret(instance v1.Object, secretSpec *current.SecretSpec) error { + name := fmt.Sprintf("ecert-%s-admincerts", instance.GetName()) + + adminCerts := common.GetAdminCertsFromSpec(secretSpec) + + if len(adminCerts) == 0 || string(adminCerts[0]) == "" { + return nil + } + + adminCertsBytes, err := common.ConvertCertsToBytes(adminCerts) + if err != nil { + return err + } + + data := s.GetCertsData("admincert", adminCertsBytes) + err = s.CreateOrUpdateSecret(instance, name, data) + if err != nil { + return err + } + + return nil +} + +func (s *SecretManager) BuildSecret(instance v1.Object, name string, data map[string][]byte, labels map[string]string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: labels, + }, + Data: data, + Type: corev1.SecretTypeOpaque, + } +} + +func (s *SecretManager) GetSecret(name string, instance v1.Object) (*corev1.Secret, error) { + n := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := s.Client.Get(context.TODO(), n, secret) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + return secret, nil +} + +func (s *SecretManager) GetCertsData(certType string, certs [][]byte) map[string][]byte { + data := map[string][]byte{} + for i, cert := range certs { + if string(cert) == "" { + continue + } + data[fmt.Sprintf("%s-%d.pem", certType, i)] = cert + } + + return data +} + +func (s *SecretManager) DeleteSecrets(prefix string, instance v1.Object, name string) error { + secret := &corev1.Secret{} + secret.Namespace = instance.GetNamespace() + + secret.Name = fmt.Sprintf("%s-%s-%s", prefix, name, "admincerts") + err := s.Client.Delete(context.TODO(), secret) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete secret '%s'", secret.Name) + } + } + + secret.Name = fmt.Sprintf("%s-%s-%s", prefix, name, "cacerts") + err = s.Client.Delete(context.TODO(), secret) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete secret '%s'", secret.Name) + } + } + + secret.Name = fmt.Sprintf("%s-%s-%s", prefix, name, "intercerts") + err = s.Client.Delete(context.TODO(), secret) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete secret '%s'", secret.Name) + } + } + + secret.Name = fmt.Sprintf("%s-%s-%s", prefix, name, "signcert") + err = s.Client.Delete(context.TODO(), secret) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete secret '%s'", secret.Name) + } + } + + secret.Name = fmt.Sprintf("%s-%s-%s", prefix, name, "keystore") + err = s.Client.Delete(context.TODO(), secret) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete secret '%s'", secret.Name) + } + } + + return nil +} + +func (s *SecretManager) GetCryptoFromSecrets(prefix common.SecretType, instance v1.Object) (*config.Response, error) { + resp := &config.Response{} + + admincerts, err := s.GetSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "admincerts"), instance) + if err != nil { + return nil, err + } + if admincerts != nil { + resp.AdminCerts = s.GetCertBytesFromData(admincerts.Data) + } + + cacerts, err := s.GetSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "cacerts"), instance) + if err != nil { + return nil, err + } + if cacerts != nil { + resp.CACerts = s.GetCertBytesFromData(cacerts.Data) + } + + intercerts, err := s.GetSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "intercerts"), instance) + if err != nil { + return nil, err + } + if intercerts != nil { + resp.IntermediateCerts = s.GetCertBytesFromData(intercerts.Data) + } + + signcert, err := s.GetSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "signcert"), instance) + if err != nil { + return nil, err + } + if signcert != nil { + resp.SignCert = signcert.Data["cert.pem"] + } + + keystore, err := s.GetSecret(fmt.Sprintf("%s-%s-%s", prefix, instance.GetName(), "keystore"), instance) + if err != nil { + return nil, err + } + if keystore != nil { + resp.Keystore = keystore.Data["key.pem"] + } + + return resp, nil +} + +func (s *SecretManager) GetCertBytesFromData(data map[string][]byte) [][]byte { + bytes := [][]byte{} + for _, cert := range data { + bytes = append(bytes, cert) + } + return bytes +} + +func (s *SecretManager) GenerateSecretsFromResponse(instance v1.Object, cryptoResponse *config.CryptoResponse) error { + if cryptoResponse != nil { + err := s.GenerateSecrets("ecert", instance, cryptoResponse.Enrollment) + if err != nil { + return errors.Wrap(err, "failed to generate ecert secrets") + } + + err = s.GenerateSecrets("tls", instance, cryptoResponse.TLS) + if err != nil { + return errors.Wrap(err, "failed to generate tls secrets") + } + + err = s.GenerateSecrets("clientauth", instance, cryptoResponse.ClientAuth) + if err != nil { + return errors.Wrap(err, "failed to generate client auth secrets") + } + } + return nil +} + +func (s *SecretManager) UpdateSecretsFromResponse(instance v1.Object, cryptoResponse *config.CryptoResponse) error { + if cryptoResponse != nil { + err := s.UpdateSecrets("ecert", instance, cryptoResponse.Enrollment) + if err != nil { + return errors.Wrap(err, "failed to update ecert secrets") + } + + err = s.UpdateSecrets("tls", instance, cryptoResponse.TLS) + if err != nil { + return errors.Wrap(err, "failed to update tls secrets") + } + + err = s.UpdateSecrets("clientauth", instance, cryptoResponse.ClientAuth) + if err != nil { + return errors.Wrap(err, "failed to update client auth secrets") + } + } + return nil +} + +func (s *SecretManager) GetCryptoResponseFromSecrets(instance v1.Object) (*config.CryptoResponse, error) { + var err error + cryptoResponse := &config.CryptoResponse{} + + cryptoResponse.Enrollment, err = s.GetCryptoFromSecrets("ecert", instance) + if err != nil { + return nil, errors.Wrap(err, "failed to get ecert crypto") + } + cryptoResponse.TLS, err = s.GetCryptoFromSecrets("tls", instance) + if err != nil { + return nil, errors.Wrap(err, "failed to get tls crypto") + } + cryptoResponse.ClientAuth, err = s.GetCryptoFromSecrets("clientauth", instance) + if err != nil { + return nil, errors.Wrap(err, "failed to get client auth crypto") + } + + return cryptoResponse, nil +} diff --git a/pkg/initializer/common/secretmanager/secretmanager_suite_test.go b/pkg/initializer/common/secretmanager/secretmanager_suite_test.go new file mode 100644 index 00000000..d5256d7a --- /dev/null +++ b/pkg/initializer/common/secretmanager/secretmanager_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package secretmanager_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestSecretmanager(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Secretmanager Suite") +} diff --git a/pkg/initializer/common/secretmanager/secretmanager_test.go b/pkg/initializer/common/secretmanager/secretmanager_test.go new file mode 100644 index 00000000..8be1d14d --- /dev/null +++ b/pkg/initializer/common/secretmanager/secretmanager_test.go @@ -0,0 +1,194 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package secretmanager_test + +import ( + "context" + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/secretmanager" +) + +var _ = Describe("Secretmanager", func() { + Context("generate secrets", func() { + var ( + resp *config.Response + mockClient *controllermocks.Client + instance v1.Object + secretManager *secretmanager.SecretManager + ) + + BeforeEach(func() { + mockClient = &controllermocks.Client{} + instance = ¤t.IBPPeer{} + + getLabels := func(instance v1.Object) map[string]string { + return map[string]string{} + } + secretManager = secretmanager.New(mockClient, runtime.NewScheme(), getLabels) + + resp = &config.Response{ + CACerts: [][]byte{[]byte("cacert")}, + IntermediateCerts: [][]byte{[]byte("intercert")}, + AdminCerts: [][]byte{[]byte("admincert")}, + SignCert: []byte("signcert"), + Keystore: []byte("key"), + } + + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.Secret) + switch types.Name { + case "ecert-" + instance.GetName() + "-signcert": + o.Name = "ecert-" + instance.GetName() + "-signcert" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"cert.pem": []byte("signcert")} + case "ecert-" + instance.GetName() + "-keystore": + o.Name = "ecert-" + instance.GetName() + "-keystore" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{"key.pem": []byte("key")} + case "ecert-" + instance.GetName() + "-admincerts": + o.Name = "ecert-" + instance.GetName() + "-admincerts" + o.Namespace = instance.GetNamespace() + o.Data = map[string][]byte{ + "admincert-0.pem": []byte("admincert"), + "admincert-1.pem": []byte("admincert"), + } + } + return nil + } + }) + + Context("admin certs secret", func() { + It("returns an error on failure", func() { + msg := "admin certs error" + mockClient.CreateOrUpdateReturnsOnCall(0, errors.New(msg)) + + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create admin certs secret: " + msg)) + }) + + It("generates ecert admin cert secret", func() { + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).NotTo(HaveOccurred()) + Expect(mockClient.CreateOrUpdateCallCount()).To(Equal(5)) + }) + + It("does not generate tls admin cert secret", func() { + err := secretManager.GenerateSecrets("tls", instance, resp) + Expect(err).NotTo(HaveOccurred()) + Expect(mockClient.CreateOrUpdateCallCount()).To(Equal(4)) + }) + }) + + Context("ca certs secret", func() { + It("returns an error on failure", func() { + msg := "ca certs error" + mockClient.CreateOrUpdateReturnsOnCall(1, errors.New(msg)) + + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create ca certs secret: " + msg)) + }) + + It("generates", func() { + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("intermediate certs secret", func() { + It("returns an error on failure", func() { + msg := "intermediate certs error" + mockClient.CreateOrUpdateReturnsOnCall(2, errors.New(msg)) + + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create intermediate ca certs secret: " + msg)) + }) + + It("generates", func() { + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("sign certs secret", func() { + It("returns an error on failure", func() { + msg := "sign certs error" + mockClient.CreateOrUpdateReturnsOnCall(3, errors.New(msg)) + + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create signing cert secret: " + msg)) + }) + + It("generates", func() { + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("key secret", func() { + It("returns an error on failure", func() { + msg := "key error" + mockClient.CreateOrUpdateReturnsOnCall(4, errors.New(msg)) + + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create key secret: " + msg)) + }) + + It("generates", func() { + err := secretManager.GenerateSecrets("ecert", instance, resp) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("get crypto from secret", func() { + It("returns an error if failed to get secret", func() { + mockClient.GetReturns(errors.New("get error")) + _, err := secretManager.GetCryptoFromSecrets("ecert", instance) + Expect(err).To(HaveOccurred()) + }) + + It("returns crypto response from tls cert secrets", func() { + tlscrypto, err := secretManager.GetCryptoFromSecrets("ecert", instance) + Expect(err).NotTo(HaveOccurred()) + Expect(tlscrypto).NotTo(BeNil()) + + Expect(tlscrypto.AdminCerts).To(Equal([][]byte{[]byte("admincert"), []byte("admincert")})) + Expect(tlscrypto.SignCert).To(Equal([]byte("signcert"))) + Expect(tlscrypto.Keystore).To(Equal([]byte("key"))) + }) + }) + }) + +}) diff --git a/pkg/initializer/cryptogen/bccsp.go b/pkg/initializer/cryptogen/bccsp.go new file mode 100644 index 00000000..dc823372 --- /dev/null +++ b/pkg/initializer/cryptogen/bccsp.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cryptogen + +import ( + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + common "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/version" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +//go:generate counterfeiter -o mocks/instance.go -fake-name Instance . Instance + +type Instance interface { + runtime.Object + metav1.Object + IsHSMEnabled() bool + UsingHSMProxy() bool + GetConfigOverride() (interface{}, error) +} + +//go:generate counterfeiter -o mocks/config.go -fake-name Config . Config + +type Config interface { + SetDefaultKeyStore() + SetPKCS11Defaults(bool) + GetBCCSPSection() *common.BCCSP +} + +func InitBCCSP(instance Instance) *common.BCCSP { + if !instance.IsHSMEnabled() { + return nil + } + + co, err := instance.GetConfigOverride() + if err != nil { + return nil + } + + configOverride, ok := co.(Config) + if !ok { + return nil + } + + if instance.IsHSMEnabled() { + configOverride.SetPKCS11Defaults(instance.UsingHSMProxy()) + + switch i := instance.(type) { + case *current.IBPPeer: + // If peer is older than 1.4.7 than we need to set msp/keystore path + // even when using PKCS11 (HSM) other wise fabric peer refuses to start + peerTag := strings.Split(i.Spec.Images.PeerTag, "-")[0] + if version.String(peerTag).LessThan(version.V1_4_7) { + configOverride.SetDefaultKeyStore() + } + } + } + + return configOverride.GetBCCSPSection() +} diff --git a/pkg/initializer/cryptogen/mocks/config.go b/pkg/initializer/cryptogen/mocks/config.go new file mode 100644 index 00000000..d1cb451e --- /dev/null +++ b/pkg/initializer/cryptogen/mocks/config.go @@ -0,0 +1,172 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/cryptogen" +) + +type Config struct { + GetBCCSPSectionStub func() *common.BCCSP + getBCCSPSectionMutex sync.RWMutex + getBCCSPSectionArgsForCall []struct { + } + getBCCSPSectionReturns struct { + result1 *common.BCCSP + } + getBCCSPSectionReturnsOnCall map[int]struct { + result1 *common.BCCSP + } + SetDefaultKeyStoreStub func() + setDefaultKeyStoreMutex sync.RWMutex + setDefaultKeyStoreArgsForCall []struct { + } + SetPKCS11DefaultsStub func(bool) + setPKCS11DefaultsMutex sync.RWMutex + setPKCS11DefaultsArgsForCall []struct { + arg1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Config) GetBCCSPSection() *common.BCCSP { + fake.getBCCSPSectionMutex.Lock() + ret, specificReturn := fake.getBCCSPSectionReturnsOnCall[len(fake.getBCCSPSectionArgsForCall)] + fake.getBCCSPSectionArgsForCall = append(fake.getBCCSPSectionArgsForCall, struct { + }{}) + stub := fake.GetBCCSPSectionStub + fakeReturns := fake.getBCCSPSectionReturns + fake.recordInvocation("GetBCCSPSection", []interface{}{}) + fake.getBCCSPSectionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Config) GetBCCSPSectionCallCount() int { + fake.getBCCSPSectionMutex.RLock() + defer fake.getBCCSPSectionMutex.RUnlock() + return len(fake.getBCCSPSectionArgsForCall) +} + +func (fake *Config) GetBCCSPSectionCalls(stub func() *common.BCCSP) { + fake.getBCCSPSectionMutex.Lock() + defer fake.getBCCSPSectionMutex.Unlock() + fake.GetBCCSPSectionStub = stub +} + +func (fake *Config) GetBCCSPSectionReturns(result1 *common.BCCSP) { + fake.getBCCSPSectionMutex.Lock() + defer fake.getBCCSPSectionMutex.Unlock() + fake.GetBCCSPSectionStub = nil + fake.getBCCSPSectionReturns = struct { + result1 *common.BCCSP + }{result1} +} + +func (fake *Config) GetBCCSPSectionReturnsOnCall(i int, result1 *common.BCCSP) { + fake.getBCCSPSectionMutex.Lock() + defer fake.getBCCSPSectionMutex.Unlock() + fake.GetBCCSPSectionStub = nil + if fake.getBCCSPSectionReturnsOnCall == nil { + fake.getBCCSPSectionReturnsOnCall = make(map[int]struct { + result1 *common.BCCSP + }) + } + fake.getBCCSPSectionReturnsOnCall[i] = struct { + result1 *common.BCCSP + }{result1} +} + +func (fake *Config) SetDefaultKeyStore() { + fake.setDefaultKeyStoreMutex.Lock() + fake.setDefaultKeyStoreArgsForCall = append(fake.setDefaultKeyStoreArgsForCall, struct { + }{}) + stub := fake.SetDefaultKeyStoreStub + fake.recordInvocation("SetDefaultKeyStore", []interface{}{}) + fake.setDefaultKeyStoreMutex.Unlock() + if stub != nil { + fake.SetDefaultKeyStoreStub() + } +} + +func (fake *Config) SetDefaultKeyStoreCallCount() int { + fake.setDefaultKeyStoreMutex.RLock() + defer fake.setDefaultKeyStoreMutex.RUnlock() + return len(fake.setDefaultKeyStoreArgsForCall) +} + +func (fake *Config) SetDefaultKeyStoreCalls(stub func()) { + fake.setDefaultKeyStoreMutex.Lock() + defer fake.setDefaultKeyStoreMutex.Unlock() + fake.SetDefaultKeyStoreStub = stub +} + +func (fake *Config) SetPKCS11Defaults(arg1 bool) { + fake.setPKCS11DefaultsMutex.Lock() + fake.setPKCS11DefaultsArgsForCall = append(fake.setPKCS11DefaultsArgsForCall, struct { + arg1 bool + }{arg1}) + stub := fake.SetPKCS11DefaultsStub + fake.recordInvocation("SetPKCS11Defaults", []interface{}{arg1}) + fake.setPKCS11DefaultsMutex.Unlock() + if stub != nil { + fake.SetPKCS11DefaultsStub(arg1) + } +} + +func (fake *Config) SetPKCS11DefaultsCallCount() int { + fake.setPKCS11DefaultsMutex.RLock() + defer fake.setPKCS11DefaultsMutex.RUnlock() + return len(fake.setPKCS11DefaultsArgsForCall) +} + +func (fake *Config) SetPKCS11DefaultsCalls(stub func(bool)) { + fake.setPKCS11DefaultsMutex.Lock() + defer fake.setPKCS11DefaultsMutex.Unlock() + fake.SetPKCS11DefaultsStub = stub +} + +func (fake *Config) SetPKCS11DefaultsArgsForCall(i int) bool { + fake.setPKCS11DefaultsMutex.RLock() + defer fake.setPKCS11DefaultsMutex.RUnlock() + argsForCall := fake.setPKCS11DefaultsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Config) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getBCCSPSectionMutex.RLock() + defer fake.getBCCSPSectionMutex.RUnlock() + fake.setDefaultKeyStoreMutex.RLock() + defer fake.setDefaultKeyStoreMutex.RUnlock() + fake.setPKCS11DefaultsMutex.RLock() + defer fake.setPKCS11DefaultsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Config) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ cryptogen.Config = new(Config) diff --git a/pkg/initializer/cryptogen/mocks/instance.go b/pkg/initializer/cryptogen/mocks/instance.go new file mode 100644 index 00000000..9a74e6aa --- /dev/null +++ b/pkg/initializer/cryptogen/mocks/instance.go @@ -0,0 +1,2050 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/cryptogen" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +type Instance struct { + DeepCopyObjectStub func() runtime.Object + deepCopyObjectMutex sync.RWMutex + deepCopyObjectArgsForCall []struct { + } + deepCopyObjectReturns struct { + result1 runtime.Object + } + deepCopyObjectReturnsOnCall map[int]struct { + result1 runtime.Object + } + GetAnnotationsStub func() map[string]string + getAnnotationsMutex sync.RWMutex + getAnnotationsArgsForCall []struct { + } + getAnnotationsReturns struct { + result1 map[string]string + } + getAnnotationsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetClusterNameStub func() string + getClusterNameMutex sync.RWMutex + getClusterNameArgsForCall []struct { + } + getClusterNameReturns struct { + result1 string + } + getClusterNameReturnsOnCall map[int]struct { + result1 string + } + GetConfigOverrideStub func() (interface{}, error) + getConfigOverrideMutex sync.RWMutex + getConfigOverrideArgsForCall []struct { + } + getConfigOverrideReturns struct { + result1 interface{} + result2 error + } + getConfigOverrideReturnsOnCall map[int]struct { + result1 interface{} + result2 error + } + GetCreationTimestampStub func() v1.Time + getCreationTimestampMutex sync.RWMutex + getCreationTimestampArgsForCall []struct { + } + getCreationTimestampReturns struct { + result1 v1.Time + } + getCreationTimestampReturnsOnCall map[int]struct { + result1 v1.Time + } + GetDeletionGracePeriodSecondsStub func() *int64 + getDeletionGracePeriodSecondsMutex sync.RWMutex + getDeletionGracePeriodSecondsArgsForCall []struct { + } + getDeletionGracePeriodSecondsReturns struct { + result1 *int64 + } + getDeletionGracePeriodSecondsReturnsOnCall map[int]struct { + result1 *int64 + } + GetDeletionTimestampStub func() *v1.Time + getDeletionTimestampMutex sync.RWMutex + getDeletionTimestampArgsForCall []struct { + } + getDeletionTimestampReturns struct { + result1 *v1.Time + } + getDeletionTimestampReturnsOnCall map[int]struct { + result1 *v1.Time + } + GetFinalizersStub func() []string + getFinalizersMutex sync.RWMutex + getFinalizersArgsForCall []struct { + } + getFinalizersReturns struct { + result1 []string + } + getFinalizersReturnsOnCall map[int]struct { + result1 []string + } + GetGenerateNameStub func() string + getGenerateNameMutex sync.RWMutex + getGenerateNameArgsForCall []struct { + } + getGenerateNameReturns struct { + result1 string + } + getGenerateNameReturnsOnCall map[int]struct { + result1 string + } + GetGenerationStub func() int64 + getGenerationMutex sync.RWMutex + getGenerationArgsForCall []struct { + } + getGenerationReturns struct { + result1 int64 + } + getGenerationReturnsOnCall map[int]struct { + result1 int64 + } + GetLabelsStub func() map[string]string + getLabelsMutex sync.RWMutex + getLabelsArgsForCall []struct { + } + getLabelsReturns struct { + result1 map[string]string + } + getLabelsReturnsOnCall map[int]struct { + result1 map[string]string + } + GetManagedFieldsStub func() []v1.ManagedFieldsEntry + getManagedFieldsMutex sync.RWMutex + getManagedFieldsArgsForCall []struct { + } + getManagedFieldsReturns struct { + result1 []v1.ManagedFieldsEntry + } + getManagedFieldsReturnsOnCall map[int]struct { + result1 []v1.ManagedFieldsEntry + } + GetNameStub func() string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetNamespaceStub func() string + getNamespaceMutex sync.RWMutex + getNamespaceArgsForCall []struct { + } + getNamespaceReturns struct { + result1 string + } + getNamespaceReturnsOnCall map[int]struct { + result1 string + } + GetObjectKindStub func() schema.ObjectKind + getObjectKindMutex sync.RWMutex + getObjectKindArgsForCall []struct { + } + getObjectKindReturns struct { + result1 schema.ObjectKind + } + getObjectKindReturnsOnCall map[int]struct { + result1 schema.ObjectKind + } + GetOwnerReferencesStub func() []v1.OwnerReference + getOwnerReferencesMutex sync.RWMutex + getOwnerReferencesArgsForCall []struct { + } + getOwnerReferencesReturns struct { + result1 []v1.OwnerReference + } + getOwnerReferencesReturnsOnCall map[int]struct { + result1 []v1.OwnerReference + } + GetResourceVersionStub func() string + getResourceVersionMutex sync.RWMutex + getResourceVersionArgsForCall []struct { + } + getResourceVersionReturns struct { + result1 string + } + getResourceVersionReturnsOnCall map[int]struct { + result1 string + } + GetSelfLinkStub func() string + getSelfLinkMutex sync.RWMutex + getSelfLinkArgsForCall []struct { + } + getSelfLinkReturns struct { + result1 string + } + getSelfLinkReturnsOnCall map[int]struct { + result1 string + } + GetUIDStub func() types.UID + getUIDMutex sync.RWMutex + getUIDArgsForCall []struct { + } + getUIDReturns struct { + result1 types.UID + } + getUIDReturnsOnCall map[int]struct { + result1 types.UID + } + IsHSMEnabledStub func() bool + isHSMEnabledMutex sync.RWMutex + isHSMEnabledArgsForCall []struct { + } + isHSMEnabledReturns struct { + result1 bool + } + isHSMEnabledReturnsOnCall map[int]struct { + result1 bool + } + SetAnnotationsStub func(map[string]string) + setAnnotationsMutex sync.RWMutex + setAnnotationsArgsForCall []struct { + arg1 map[string]string + } + SetClusterNameStub func(string) + setClusterNameMutex sync.RWMutex + setClusterNameArgsForCall []struct { + arg1 string + } + SetCreationTimestampStub func(v1.Time) + setCreationTimestampMutex sync.RWMutex + setCreationTimestampArgsForCall []struct { + arg1 v1.Time + } + SetDeletionGracePeriodSecondsStub func(*int64) + setDeletionGracePeriodSecondsMutex sync.RWMutex + setDeletionGracePeriodSecondsArgsForCall []struct { + arg1 *int64 + } + SetDeletionTimestampStub func(*v1.Time) + setDeletionTimestampMutex sync.RWMutex + setDeletionTimestampArgsForCall []struct { + arg1 *v1.Time + } + SetFinalizersStub func([]string) + setFinalizersMutex sync.RWMutex + setFinalizersArgsForCall []struct { + arg1 []string + } + SetGenerateNameStub func(string) + setGenerateNameMutex sync.RWMutex + setGenerateNameArgsForCall []struct { + arg1 string + } + SetGenerationStub func(int64) + setGenerationMutex sync.RWMutex + setGenerationArgsForCall []struct { + arg1 int64 + } + SetLabelsStub func(map[string]string) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 map[string]string + } + SetManagedFieldsStub func([]v1.ManagedFieldsEntry) + setManagedFieldsMutex sync.RWMutex + setManagedFieldsArgsForCall []struct { + arg1 []v1.ManagedFieldsEntry + } + SetNameStub func(string) + setNameMutex sync.RWMutex + setNameArgsForCall []struct { + arg1 string + } + SetNamespaceStub func(string) + setNamespaceMutex sync.RWMutex + setNamespaceArgsForCall []struct { + arg1 string + } + SetOwnerReferencesStub func([]v1.OwnerReference) + setOwnerReferencesMutex sync.RWMutex + setOwnerReferencesArgsForCall []struct { + arg1 []v1.OwnerReference + } + SetResourceVersionStub func(string) + setResourceVersionMutex sync.RWMutex + setResourceVersionArgsForCall []struct { + arg1 string + } + SetSelfLinkStub func(string) + setSelfLinkMutex sync.RWMutex + setSelfLinkArgsForCall []struct { + arg1 string + } + SetUIDStub func(types.UID) + setUIDMutex sync.RWMutex + setUIDArgsForCall []struct { + arg1 types.UID + } + UsingHSMProxyStub func() bool + usingHSMProxyMutex sync.RWMutex + usingHSMProxyArgsForCall []struct { + } + usingHSMProxyReturns struct { + result1 bool + } + usingHSMProxyReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Instance) DeepCopyObject() runtime.Object { + fake.deepCopyObjectMutex.Lock() + ret, specificReturn := fake.deepCopyObjectReturnsOnCall[len(fake.deepCopyObjectArgsForCall)] + fake.deepCopyObjectArgsForCall = append(fake.deepCopyObjectArgsForCall, struct { + }{}) + stub := fake.DeepCopyObjectStub + fakeReturns := fake.deepCopyObjectReturns + fake.recordInvocation("DeepCopyObject", []interface{}{}) + fake.deepCopyObjectMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) DeepCopyObjectCallCount() int { + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + return len(fake.deepCopyObjectArgsForCall) +} + +func (fake *Instance) DeepCopyObjectCalls(stub func() runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = stub +} + +func (fake *Instance) DeepCopyObjectReturns(result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + fake.deepCopyObjectReturns = struct { + result1 runtime.Object + }{result1} +} + +func (fake *Instance) DeepCopyObjectReturnsOnCall(i int, result1 runtime.Object) { + fake.deepCopyObjectMutex.Lock() + defer fake.deepCopyObjectMutex.Unlock() + fake.DeepCopyObjectStub = nil + if fake.deepCopyObjectReturnsOnCall == nil { + fake.deepCopyObjectReturnsOnCall = make(map[int]struct { + result1 runtime.Object + }) + } + fake.deepCopyObjectReturnsOnCall[i] = struct { + result1 runtime.Object + }{result1} +} + +func (fake *Instance) GetAnnotations() map[string]string { + fake.getAnnotationsMutex.Lock() + ret, specificReturn := fake.getAnnotationsReturnsOnCall[len(fake.getAnnotationsArgsForCall)] + fake.getAnnotationsArgsForCall = append(fake.getAnnotationsArgsForCall, struct { + }{}) + stub := fake.GetAnnotationsStub + fakeReturns := fake.getAnnotationsReturns + fake.recordInvocation("GetAnnotations", []interface{}{}) + fake.getAnnotationsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetAnnotationsCallCount() int { + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + return len(fake.getAnnotationsArgsForCall) +} + +func (fake *Instance) GetAnnotationsCalls(stub func() map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = stub +} + +func (fake *Instance) GetAnnotationsReturns(result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + fake.getAnnotationsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetAnnotationsReturnsOnCall(i int, result1 map[string]string) { + fake.getAnnotationsMutex.Lock() + defer fake.getAnnotationsMutex.Unlock() + fake.GetAnnotationsStub = nil + if fake.getAnnotationsReturnsOnCall == nil { + fake.getAnnotationsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getAnnotationsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetClusterName() string { + fake.getClusterNameMutex.Lock() + ret, specificReturn := fake.getClusterNameReturnsOnCall[len(fake.getClusterNameArgsForCall)] + fake.getClusterNameArgsForCall = append(fake.getClusterNameArgsForCall, struct { + }{}) + stub := fake.GetClusterNameStub + fakeReturns := fake.getClusterNameReturns + fake.recordInvocation("GetClusterName", []interface{}{}) + fake.getClusterNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetClusterNameCallCount() int { + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + return len(fake.getClusterNameArgsForCall) +} + +func (fake *Instance) GetClusterNameCalls(stub func() string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = stub +} + +func (fake *Instance) GetClusterNameReturns(result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + fake.getClusterNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetClusterNameReturnsOnCall(i int, result1 string) { + fake.getClusterNameMutex.Lock() + defer fake.getClusterNameMutex.Unlock() + fake.GetClusterNameStub = nil + if fake.getClusterNameReturnsOnCall == nil { + fake.getClusterNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getClusterNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetConfigOverride() (interface{}, error) { + fake.getConfigOverrideMutex.Lock() + ret, specificReturn := fake.getConfigOverrideReturnsOnCall[len(fake.getConfigOverrideArgsForCall)] + fake.getConfigOverrideArgsForCall = append(fake.getConfigOverrideArgsForCall, struct { + }{}) + stub := fake.GetConfigOverrideStub + fakeReturns := fake.getConfigOverrideReturns + fake.recordInvocation("GetConfigOverride", []interface{}{}) + fake.getConfigOverrideMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Instance) GetConfigOverrideCallCount() int { + fake.getConfigOverrideMutex.RLock() + defer fake.getConfigOverrideMutex.RUnlock() + return len(fake.getConfigOverrideArgsForCall) +} + +func (fake *Instance) GetConfigOverrideCalls(stub func() (interface{}, error)) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = stub +} + +func (fake *Instance) GetConfigOverrideReturns(result1 interface{}, result2 error) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = nil + fake.getConfigOverrideReturns = struct { + result1 interface{} + result2 error + }{result1, result2} +} + +func (fake *Instance) GetConfigOverrideReturnsOnCall(i int, result1 interface{}, result2 error) { + fake.getConfigOverrideMutex.Lock() + defer fake.getConfigOverrideMutex.Unlock() + fake.GetConfigOverrideStub = nil + if fake.getConfigOverrideReturnsOnCall == nil { + fake.getConfigOverrideReturnsOnCall = make(map[int]struct { + result1 interface{} + result2 error + }) + } + fake.getConfigOverrideReturnsOnCall[i] = struct { + result1 interface{} + result2 error + }{result1, result2} +} + +func (fake *Instance) GetCreationTimestamp() v1.Time { + fake.getCreationTimestampMutex.Lock() + ret, specificReturn := fake.getCreationTimestampReturnsOnCall[len(fake.getCreationTimestampArgsForCall)] + fake.getCreationTimestampArgsForCall = append(fake.getCreationTimestampArgsForCall, struct { + }{}) + stub := fake.GetCreationTimestampStub + fakeReturns := fake.getCreationTimestampReturns + fake.recordInvocation("GetCreationTimestamp", []interface{}{}) + fake.getCreationTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetCreationTimestampCallCount() int { + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + return len(fake.getCreationTimestampArgsForCall) +} + +func (fake *Instance) GetCreationTimestampCalls(stub func() v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = stub +} + +func (fake *Instance) GetCreationTimestampReturns(result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + fake.getCreationTimestampReturns = struct { + result1 v1.Time + }{result1} +} + +func (fake *Instance) GetCreationTimestampReturnsOnCall(i int, result1 v1.Time) { + fake.getCreationTimestampMutex.Lock() + defer fake.getCreationTimestampMutex.Unlock() + fake.GetCreationTimestampStub = nil + if fake.getCreationTimestampReturnsOnCall == nil { + fake.getCreationTimestampReturnsOnCall = make(map[int]struct { + result1 v1.Time + }) + } + fake.getCreationTimestampReturnsOnCall[i] = struct { + result1 v1.Time + }{result1} +} + +func (fake *Instance) GetDeletionGracePeriodSeconds() *int64 { + fake.getDeletionGracePeriodSecondsMutex.Lock() + ret, specificReturn := fake.getDeletionGracePeriodSecondsReturnsOnCall[len(fake.getDeletionGracePeriodSecondsArgsForCall)] + fake.getDeletionGracePeriodSecondsArgsForCall = append(fake.getDeletionGracePeriodSecondsArgsForCall, struct { + }{}) + stub := fake.GetDeletionGracePeriodSecondsStub + fakeReturns := fake.getDeletionGracePeriodSecondsReturns + fake.recordInvocation("GetDeletionGracePeriodSeconds", []interface{}{}) + fake.getDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetDeletionGracePeriodSecondsCallCount() int { + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.getDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *Instance) GetDeletionGracePeriodSecondsCalls(stub func() *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = stub +} + +func (fake *Instance) GetDeletionGracePeriodSecondsReturns(result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + fake.getDeletionGracePeriodSecondsReturns = struct { + result1 *int64 + }{result1} +} + +func (fake *Instance) GetDeletionGracePeriodSecondsReturnsOnCall(i int, result1 *int64) { + fake.getDeletionGracePeriodSecondsMutex.Lock() + defer fake.getDeletionGracePeriodSecondsMutex.Unlock() + fake.GetDeletionGracePeriodSecondsStub = nil + if fake.getDeletionGracePeriodSecondsReturnsOnCall == nil { + fake.getDeletionGracePeriodSecondsReturnsOnCall = make(map[int]struct { + result1 *int64 + }) + } + fake.getDeletionGracePeriodSecondsReturnsOnCall[i] = struct { + result1 *int64 + }{result1} +} + +func (fake *Instance) GetDeletionTimestamp() *v1.Time { + fake.getDeletionTimestampMutex.Lock() + ret, specificReturn := fake.getDeletionTimestampReturnsOnCall[len(fake.getDeletionTimestampArgsForCall)] + fake.getDeletionTimestampArgsForCall = append(fake.getDeletionTimestampArgsForCall, struct { + }{}) + stub := fake.GetDeletionTimestampStub + fakeReturns := fake.getDeletionTimestampReturns + fake.recordInvocation("GetDeletionTimestamp", []interface{}{}) + fake.getDeletionTimestampMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetDeletionTimestampCallCount() int { + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + return len(fake.getDeletionTimestampArgsForCall) +} + +func (fake *Instance) GetDeletionTimestampCalls(stub func() *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = stub +} + +func (fake *Instance) GetDeletionTimestampReturns(result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + fake.getDeletionTimestampReturns = struct { + result1 *v1.Time + }{result1} +} + +func (fake *Instance) GetDeletionTimestampReturnsOnCall(i int, result1 *v1.Time) { + fake.getDeletionTimestampMutex.Lock() + defer fake.getDeletionTimestampMutex.Unlock() + fake.GetDeletionTimestampStub = nil + if fake.getDeletionTimestampReturnsOnCall == nil { + fake.getDeletionTimestampReturnsOnCall = make(map[int]struct { + result1 *v1.Time + }) + } + fake.getDeletionTimestampReturnsOnCall[i] = struct { + result1 *v1.Time + }{result1} +} + +func (fake *Instance) GetFinalizers() []string { + fake.getFinalizersMutex.Lock() + ret, specificReturn := fake.getFinalizersReturnsOnCall[len(fake.getFinalizersArgsForCall)] + fake.getFinalizersArgsForCall = append(fake.getFinalizersArgsForCall, struct { + }{}) + stub := fake.GetFinalizersStub + fakeReturns := fake.getFinalizersReturns + fake.recordInvocation("GetFinalizers", []interface{}{}) + fake.getFinalizersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetFinalizersCallCount() int { + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + return len(fake.getFinalizersArgsForCall) +} + +func (fake *Instance) GetFinalizersCalls(stub func() []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = stub +} + +func (fake *Instance) GetFinalizersReturns(result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + fake.getFinalizersReturns = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetFinalizersReturnsOnCall(i int, result1 []string) { + fake.getFinalizersMutex.Lock() + defer fake.getFinalizersMutex.Unlock() + fake.GetFinalizersStub = nil + if fake.getFinalizersReturnsOnCall == nil { + fake.getFinalizersReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getFinalizersReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetGenerateName() string { + fake.getGenerateNameMutex.Lock() + ret, specificReturn := fake.getGenerateNameReturnsOnCall[len(fake.getGenerateNameArgsForCall)] + fake.getGenerateNameArgsForCall = append(fake.getGenerateNameArgsForCall, struct { + }{}) + stub := fake.GetGenerateNameStub + fakeReturns := fake.getGenerateNameReturns + fake.recordInvocation("GetGenerateName", []interface{}{}) + fake.getGenerateNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetGenerateNameCallCount() int { + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + return len(fake.getGenerateNameArgsForCall) +} + +func (fake *Instance) GetGenerateNameCalls(stub func() string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = stub +} + +func (fake *Instance) GetGenerateNameReturns(result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + fake.getGenerateNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetGenerateNameReturnsOnCall(i int, result1 string) { + fake.getGenerateNameMutex.Lock() + defer fake.getGenerateNameMutex.Unlock() + fake.GetGenerateNameStub = nil + if fake.getGenerateNameReturnsOnCall == nil { + fake.getGenerateNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getGenerateNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetGeneration() int64 { + fake.getGenerationMutex.Lock() + ret, specificReturn := fake.getGenerationReturnsOnCall[len(fake.getGenerationArgsForCall)] + fake.getGenerationArgsForCall = append(fake.getGenerationArgsForCall, struct { + }{}) + stub := fake.GetGenerationStub + fakeReturns := fake.getGenerationReturns + fake.recordInvocation("GetGeneration", []interface{}{}) + fake.getGenerationMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetGenerationCallCount() int { + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + return len(fake.getGenerationArgsForCall) +} + +func (fake *Instance) GetGenerationCalls(stub func() int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = stub +} + +func (fake *Instance) GetGenerationReturns(result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + fake.getGenerationReturns = struct { + result1 int64 + }{result1} +} + +func (fake *Instance) GetGenerationReturnsOnCall(i int, result1 int64) { + fake.getGenerationMutex.Lock() + defer fake.getGenerationMutex.Unlock() + fake.GetGenerationStub = nil + if fake.getGenerationReturnsOnCall == nil { + fake.getGenerationReturnsOnCall = make(map[int]struct { + result1 int64 + }) + } + fake.getGenerationReturnsOnCall[i] = struct { + result1 int64 + }{result1} +} + +func (fake *Instance) GetLabels() map[string]string { + fake.getLabelsMutex.Lock() + ret, specificReturn := fake.getLabelsReturnsOnCall[len(fake.getLabelsArgsForCall)] + fake.getLabelsArgsForCall = append(fake.getLabelsArgsForCall, struct { + }{}) + stub := fake.GetLabelsStub + fakeReturns := fake.getLabelsReturns + fake.recordInvocation("GetLabels", []interface{}{}) + fake.getLabelsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetLabelsCallCount() int { + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + return len(fake.getLabelsArgsForCall) +} + +func (fake *Instance) GetLabelsCalls(stub func() map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = stub +} + +func (fake *Instance) GetLabelsReturns(result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + fake.getLabelsReturns = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetLabelsReturnsOnCall(i int, result1 map[string]string) { + fake.getLabelsMutex.Lock() + defer fake.getLabelsMutex.Unlock() + fake.GetLabelsStub = nil + if fake.getLabelsReturnsOnCall == nil { + fake.getLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + }) + } + fake.getLabelsReturnsOnCall[i] = struct { + result1 map[string]string + }{result1} +} + +func (fake *Instance) GetManagedFields() []v1.ManagedFieldsEntry { + fake.getManagedFieldsMutex.Lock() + ret, specificReturn := fake.getManagedFieldsReturnsOnCall[len(fake.getManagedFieldsArgsForCall)] + fake.getManagedFieldsArgsForCall = append(fake.getManagedFieldsArgsForCall, struct { + }{}) + stub := fake.GetManagedFieldsStub + fakeReturns := fake.getManagedFieldsReturns + fake.recordInvocation("GetManagedFields", []interface{}{}) + fake.getManagedFieldsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetManagedFieldsCallCount() int { + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + return len(fake.getManagedFieldsArgsForCall) +} + +func (fake *Instance) GetManagedFieldsCalls(stub func() []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = stub +} + +func (fake *Instance) GetManagedFieldsReturns(result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + fake.getManagedFieldsReturns = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *Instance) GetManagedFieldsReturnsOnCall(i int, result1 []v1.ManagedFieldsEntry) { + fake.getManagedFieldsMutex.Lock() + defer fake.getManagedFieldsMutex.Unlock() + fake.GetManagedFieldsStub = nil + if fake.getManagedFieldsReturnsOnCall == nil { + fake.getManagedFieldsReturnsOnCall = make(map[int]struct { + result1 []v1.ManagedFieldsEntry + }) + } + fake.getManagedFieldsReturnsOnCall[i] = struct { + result1 []v1.ManagedFieldsEntry + }{result1} +} + +func (fake *Instance) GetName() string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + }{}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *Instance) GetNameCalls(stub func() string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *Instance) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetNamespace() string { + fake.getNamespaceMutex.Lock() + ret, specificReturn := fake.getNamespaceReturnsOnCall[len(fake.getNamespaceArgsForCall)] + fake.getNamespaceArgsForCall = append(fake.getNamespaceArgsForCall, struct { + }{}) + stub := fake.GetNamespaceStub + fakeReturns := fake.getNamespaceReturns + fake.recordInvocation("GetNamespace", []interface{}{}) + fake.getNamespaceMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetNamespaceCallCount() int { + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + return len(fake.getNamespaceArgsForCall) +} + +func (fake *Instance) GetNamespaceCalls(stub func() string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = stub +} + +func (fake *Instance) GetNamespaceReturns(result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + fake.getNamespaceReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetNamespaceReturnsOnCall(i int, result1 string) { + fake.getNamespaceMutex.Lock() + defer fake.getNamespaceMutex.Unlock() + fake.GetNamespaceStub = nil + if fake.getNamespaceReturnsOnCall == nil { + fake.getNamespaceReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNamespaceReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetObjectKind() schema.ObjectKind { + fake.getObjectKindMutex.Lock() + ret, specificReturn := fake.getObjectKindReturnsOnCall[len(fake.getObjectKindArgsForCall)] + fake.getObjectKindArgsForCall = append(fake.getObjectKindArgsForCall, struct { + }{}) + stub := fake.GetObjectKindStub + fakeReturns := fake.getObjectKindReturns + fake.recordInvocation("GetObjectKind", []interface{}{}) + fake.getObjectKindMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetObjectKindCallCount() int { + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + return len(fake.getObjectKindArgsForCall) +} + +func (fake *Instance) GetObjectKindCalls(stub func() schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = stub +} + +func (fake *Instance) GetObjectKindReturns(result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + fake.getObjectKindReturns = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *Instance) GetObjectKindReturnsOnCall(i int, result1 schema.ObjectKind) { + fake.getObjectKindMutex.Lock() + defer fake.getObjectKindMutex.Unlock() + fake.GetObjectKindStub = nil + if fake.getObjectKindReturnsOnCall == nil { + fake.getObjectKindReturnsOnCall = make(map[int]struct { + result1 schema.ObjectKind + }) + } + fake.getObjectKindReturnsOnCall[i] = struct { + result1 schema.ObjectKind + }{result1} +} + +func (fake *Instance) GetOwnerReferences() []v1.OwnerReference { + fake.getOwnerReferencesMutex.Lock() + ret, specificReturn := fake.getOwnerReferencesReturnsOnCall[len(fake.getOwnerReferencesArgsForCall)] + fake.getOwnerReferencesArgsForCall = append(fake.getOwnerReferencesArgsForCall, struct { + }{}) + stub := fake.GetOwnerReferencesStub + fakeReturns := fake.getOwnerReferencesReturns + fake.recordInvocation("GetOwnerReferences", []interface{}{}) + fake.getOwnerReferencesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetOwnerReferencesCallCount() int { + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + return len(fake.getOwnerReferencesArgsForCall) +} + +func (fake *Instance) GetOwnerReferencesCalls(stub func() []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = stub +} + +func (fake *Instance) GetOwnerReferencesReturns(result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + fake.getOwnerReferencesReturns = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *Instance) GetOwnerReferencesReturnsOnCall(i int, result1 []v1.OwnerReference) { + fake.getOwnerReferencesMutex.Lock() + defer fake.getOwnerReferencesMutex.Unlock() + fake.GetOwnerReferencesStub = nil + if fake.getOwnerReferencesReturnsOnCall == nil { + fake.getOwnerReferencesReturnsOnCall = make(map[int]struct { + result1 []v1.OwnerReference + }) + } + fake.getOwnerReferencesReturnsOnCall[i] = struct { + result1 []v1.OwnerReference + }{result1} +} + +func (fake *Instance) GetResourceVersion() string { + fake.getResourceVersionMutex.Lock() + ret, specificReturn := fake.getResourceVersionReturnsOnCall[len(fake.getResourceVersionArgsForCall)] + fake.getResourceVersionArgsForCall = append(fake.getResourceVersionArgsForCall, struct { + }{}) + stub := fake.GetResourceVersionStub + fakeReturns := fake.getResourceVersionReturns + fake.recordInvocation("GetResourceVersion", []interface{}{}) + fake.getResourceVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetResourceVersionCallCount() int { + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + return len(fake.getResourceVersionArgsForCall) +} + +func (fake *Instance) GetResourceVersionCalls(stub func() string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = stub +} + +func (fake *Instance) GetResourceVersionReturns(result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + fake.getResourceVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetResourceVersionReturnsOnCall(i int, result1 string) { + fake.getResourceVersionMutex.Lock() + defer fake.getResourceVersionMutex.Unlock() + fake.GetResourceVersionStub = nil + if fake.getResourceVersionReturnsOnCall == nil { + fake.getResourceVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getResourceVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetSelfLink() string { + fake.getSelfLinkMutex.Lock() + ret, specificReturn := fake.getSelfLinkReturnsOnCall[len(fake.getSelfLinkArgsForCall)] + fake.getSelfLinkArgsForCall = append(fake.getSelfLinkArgsForCall, struct { + }{}) + stub := fake.GetSelfLinkStub + fakeReturns := fake.getSelfLinkReturns + fake.recordInvocation("GetSelfLink", []interface{}{}) + fake.getSelfLinkMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetSelfLinkCallCount() int { + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + return len(fake.getSelfLinkArgsForCall) +} + +func (fake *Instance) GetSelfLinkCalls(stub func() string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = stub +} + +func (fake *Instance) GetSelfLinkReturns(result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + fake.getSelfLinkReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetSelfLinkReturnsOnCall(i int, result1 string) { + fake.getSelfLinkMutex.Lock() + defer fake.getSelfLinkMutex.Unlock() + fake.GetSelfLinkStub = nil + if fake.getSelfLinkReturnsOnCall == nil { + fake.getSelfLinkReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getSelfLinkReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetUID() types.UID { + fake.getUIDMutex.Lock() + ret, specificReturn := fake.getUIDReturnsOnCall[len(fake.getUIDArgsForCall)] + fake.getUIDArgsForCall = append(fake.getUIDArgsForCall, struct { + }{}) + stub := fake.GetUIDStub + fakeReturns := fake.getUIDReturns + fake.recordInvocation("GetUID", []interface{}{}) + fake.getUIDMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetUIDCallCount() int { + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + return len(fake.getUIDArgsForCall) +} + +func (fake *Instance) GetUIDCalls(stub func() types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = stub +} + +func (fake *Instance) GetUIDReturns(result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + fake.getUIDReturns = struct { + result1 types.UID + }{result1} +} + +func (fake *Instance) GetUIDReturnsOnCall(i int, result1 types.UID) { + fake.getUIDMutex.Lock() + defer fake.getUIDMutex.Unlock() + fake.GetUIDStub = nil + if fake.getUIDReturnsOnCall == nil { + fake.getUIDReturnsOnCall = make(map[int]struct { + result1 types.UID + }) + } + fake.getUIDReturnsOnCall[i] = struct { + result1 types.UID + }{result1} +} + +func (fake *Instance) IsHSMEnabled() bool { + fake.isHSMEnabledMutex.Lock() + ret, specificReturn := fake.isHSMEnabledReturnsOnCall[len(fake.isHSMEnabledArgsForCall)] + fake.isHSMEnabledArgsForCall = append(fake.isHSMEnabledArgsForCall, struct { + }{}) + stub := fake.IsHSMEnabledStub + fakeReturns := fake.isHSMEnabledReturns + fake.recordInvocation("IsHSMEnabled", []interface{}{}) + fake.isHSMEnabledMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) IsHSMEnabledCallCount() int { + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + return len(fake.isHSMEnabledArgsForCall) +} + +func (fake *Instance) IsHSMEnabledCalls(stub func() bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = stub +} + +func (fake *Instance) IsHSMEnabledReturns(result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + fake.isHSMEnabledReturns = struct { + result1 bool + }{result1} +} + +func (fake *Instance) IsHSMEnabledReturnsOnCall(i int, result1 bool) { + fake.isHSMEnabledMutex.Lock() + defer fake.isHSMEnabledMutex.Unlock() + fake.IsHSMEnabledStub = nil + if fake.isHSMEnabledReturnsOnCall == nil { + fake.isHSMEnabledReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isHSMEnabledReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Instance) SetAnnotations(arg1 map[string]string) { + fake.setAnnotationsMutex.Lock() + fake.setAnnotationsArgsForCall = append(fake.setAnnotationsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetAnnotationsStub + fake.recordInvocation("SetAnnotations", []interface{}{arg1}) + fake.setAnnotationsMutex.Unlock() + if stub != nil { + fake.SetAnnotationsStub(arg1) + } +} + +func (fake *Instance) SetAnnotationsCallCount() int { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + return len(fake.setAnnotationsArgsForCall) +} + +func (fake *Instance) SetAnnotationsCalls(stub func(map[string]string)) { + fake.setAnnotationsMutex.Lock() + defer fake.setAnnotationsMutex.Unlock() + fake.SetAnnotationsStub = stub +} + +func (fake *Instance) SetAnnotationsArgsForCall(i int) map[string]string { + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + argsForCall := fake.setAnnotationsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetClusterName(arg1 string) { + fake.setClusterNameMutex.Lock() + fake.setClusterNameArgsForCall = append(fake.setClusterNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetClusterNameStub + fake.recordInvocation("SetClusterName", []interface{}{arg1}) + fake.setClusterNameMutex.Unlock() + if stub != nil { + fake.SetClusterNameStub(arg1) + } +} + +func (fake *Instance) SetClusterNameCallCount() int { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + return len(fake.setClusterNameArgsForCall) +} + +func (fake *Instance) SetClusterNameCalls(stub func(string)) { + fake.setClusterNameMutex.Lock() + defer fake.setClusterNameMutex.Unlock() + fake.SetClusterNameStub = stub +} + +func (fake *Instance) SetClusterNameArgsForCall(i int) string { + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + argsForCall := fake.setClusterNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetCreationTimestamp(arg1 v1.Time) { + fake.setCreationTimestampMutex.Lock() + fake.setCreationTimestampArgsForCall = append(fake.setCreationTimestampArgsForCall, struct { + arg1 v1.Time + }{arg1}) + stub := fake.SetCreationTimestampStub + fake.recordInvocation("SetCreationTimestamp", []interface{}{arg1}) + fake.setCreationTimestampMutex.Unlock() + if stub != nil { + fake.SetCreationTimestampStub(arg1) + } +} + +func (fake *Instance) SetCreationTimestampCallCount() int { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + return len(fake.setCreationTimestampArgsForCall) +} + +func (fake *Instance) SetCreationTimestampCalls(stub func(v1.Time)) { + fake.setCreationTimestampMutex.Lock() + defer fake.setCreationTimestampMutex.Unlock() + fake.SetCreationTimestampStub = stub +} + +func (fake *Instance) SetCreationTimestampArgsForCall(i int) v1.Time { + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + argsForCall := fake.setCreationTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetDeletionGracePeriodSeconds(arg1 *int64) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + fake.setDeletionGracePeriodSecondsArgsForCall = append(fake.setDeletionGracePeriodSecondsArgsForCall, struct { + arg1 *int64 + }{arg1}) + stub := fake.SetDeletionGracePeriodSecondsStub + fake.recordInvocation("SetDeletionGracePeriodSeconds", []interface{}{arg1}) + fake.setDeletionGracePeriodSecondsMutex.Unlock() + if stub != nil { + fake.SetDeletionGracePeriodSecondsStub(arg1) + } +} + +func (fake *Instance) SetDeletionGracePeriodSecondsCallCount() int { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + return len(fake.setDeletionGracePeriodSecondsArgsForCall) +} + +func (fake *Instance) SetDeletionGracePeriodSecondsCalls(stub func(*int64)) { + fake.setDeletionGracePeriodSecondsMutex.Lock() + defer fake.setDeletionGracePeriodSecondsMutex.Unlock() + fake.SetDeletionGracePeriodSecondsStub = stub +} + +func (fake *Instance) SetDeletionGracePeriodSecondsArgsForCall(i int) *int64 { + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + argsForCall := fake.setDeletionGracePeriodSecondsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetDeletionTimestamp(arg1 *v1.Time) { + fake.setDeletionTimestampMutex.Lock() + fake.setDeletionTimestampArgsForCall = append(fake.setDeletionTimestampArgsForCall, struct { + arg1 *v1.Time + }{arg1}) + stub := fake.SetDeletionTimestampStub + fake.recordInvocation("SetDeletionTimestamp", []interface{}{arg1}) + fake.setDeletionTimestampMutex.Unlock() + if stub != nil { + fake.SetDeletionTimestampStub(arg1) + } +} + +func (fake *Instance) SetDeletionTimestampCallCount() int { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + return len(fake.setDeletionTimestampArgsForCall) +} + +func (fake *Instance) SetDeletionTimestampCalls(stub func(*v1.Time)) { + fake.setDeletionTimestampMutex.Lock() + defer fake.setDeletionTimestampMutex.Unlock() + fake.SetDeletionTimestampStub = stub +} + +func (fake *Instance) SetDeletionTimestampArgsForCall(i int) *v1.Time { + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + argsForCall := fake.setDeletionTimestampArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetFinalizers(arg1 []string) { + var arg1Copy []string + if arg1 != nil { + arg1Copy = make([]string, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setFinalizersMutex.Lock() + fake.setFinalizersArgsForCall = append(fake.setFinalizersArgsForCall, struct { + arg1 []string + }{arg1Copy}) + stub := fake.SetFinalizersStub + fake.recordInvocation("SetFinalizers", []interface{}{arg1Copy}) + fake.setFinalizersMutex.Unlock() + if stub != nil { + fake.SetFinalizersStub(arg1) + } +} + +func (fake *Instance) SetFinalizersCallCount() int { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + return len(fake.setFinalizersArgsForCall) +} + +func (fake *Instance) SetFinalizersCalls(stub func([]string)) { + fake.setFinalizersMutex.Lock() + defer fake.setFinalizersMutex.Unlock() + fake.SetFinalizersStub = stub +} + +func (fake *Instance) SetFinalizersArgsForCall(i int) []string { + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + argsForCall := fake.setFinalizersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetGenerateName(arg1 string) { + fake.setGenerateNameMutex.Lock() + fake.setGenerateNameArgsForCall = append(fake.setGenerateNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetGenerateNameStub + fake.recordInvocation("SetGenerateName", []interface{}{arg1}) + fake.setGenerateNameMutex.Unlock() + if stub != nil { + fake.SetGenerateNameStub(arg1) + } +} + +func (fake *Instance) SetGenerateNameCallCount() int { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + return len(fake.setGenerateNameArgsForCall) +} + +func (fake *Instance) SetGenerateNameCalls(stub func(string)) { + fake.setGenerateNameMutex.Lock() + defer fake.setGenerateNameMutex.Unlock() + fake.SetGenerateNameStub = stub +} + +func (fake *Instance) SetGenerateNameArgsForCall(i int) string { + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + argsForCall := fake.setGenerateNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetGeneration(arg1 int64) { + fake.setGenerationMutex.Lock() + fake.setGenerationArgsForCall = append(fake.setGenerationArgsForCall, struct { + arg1 int64 + }{arg1}) + stub := fake.SetGenerationStub + fake.recordInvocation("SetGeneration", []interface{}{arg1}) + fake.setGenerationMutex.Unlock() + if stub != nil { + fake.SetGenerationStub(arg1) + } +} + +func (fake *Instance) SetGenerationCallCount() int { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + return len(fake.setGenerationArgsForCall) +} + +func (fake *Instance) SetGenerationCalls(stub func(int64)) { + fake.setGenerationMutex.Lock() + defer fake.setGenerationMutex.Unlock() + fake.SetGenerationStub = stub +} + +func (fake *Instance) SetGenerationArgsForCall(i int) int64 { + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + argsForCall := fake.setGenerationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetLabels(arg1 map[string]string) { + fake.setLabelsMutex.Lock() + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 map[string]string + }{arg1}) + stub := fake.SetLabelsStub + fake.recordInvocation("SetLabels", []interface{}{arg1}) + fake.setLabelsMutex.Unlock() + if stub != nil { + fake.SetLabelsStub(arg1) + } +} + +func (fake *Instance) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *Instance) SetLabelsCalls(stub func(map[string]string)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *Instance) SetLabelsArgsForCall(i int) map[string]string { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetManagedFields(arg1 []v1.ManagedFieldsEntry) { + var arg1Copy []v1.ManagedFieldsEntry + if arg1 != nil { + arg1Copy = make([]v1.ManagedFieldsEntry, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setManagedFieldsMutex.Lock() + fake.setManagedFieldsArgsForCall = append(fake.setManagedFieldsArgsForCall, struct { + arg1 []v1.ManagedFieldsEntry + }{arg1Copy}) + stub := fake.SetManagedFieldsStub + fake.recordInvocation("SetManagedFields", []interface{}{arg1Copy}) + fake.setManagedFieldsMutex.Unlock() + if stub != nil { + fake.SetManagedFieldsStub(arg1) + } +} + +func (fake *Instance) SetManagedFieldsCallCount() int { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + return len(fake.setManagedFieldsArgsForCall) +} + +func (fake *Instance) SetManagedFieldsCalls(stub func([]v1.ManagedFieldsEntry)) { + fake.setManagedFieldsMutex.Lock() + defer fake.setManagedFieldsMutex.Unlock() + fake.SetManagedFieldsStub = stub +} + +func (fake *Instance) SetManagedFieldsArgsForCall(i int) []v1.ManagedFieldsEntry { + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + argsForCall := fake.setManagedFieldsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetName(arg1 string) { + fake.setNameMutex.Lock() + fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNameStub + fake.recordInvocation("SetName", []interface{}{arg1}) + fake.setNameMutex.Unlock() + if stub != nil { + fake.SetNameStub(arg1) + } +} + +func (fake *Instance) SetNameCallCount() int { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + return len(fake.setNameArgsForCall) +} + +func (fake *Instance) SetNameCalls(stub func(string)) { + fake.setNameMutex.Lock() + defer fake.setNameMutex.Unlock() + fake.SetNameStub = stub +} + +func (fake *Instance) SetNameArgsForCall(i int) string { + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + argsForCall := fake.setNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetNamespace(arg1 string) { + fake.setNamespaceMutex.Lock() + fake.setNamespaceArgsForCall = append(fake.setNamespaceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetNamespaceStub + fake.recordInvocation("SetNamespace", []interface{}{arg1}) + fake.setNamespaceMutex.Unlock() + if stub != nil { + fake.SetNamespaceStub(arg1) + } +} + +func (fake *Instance) SetNamespaceCallCount() int { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + return len(fake.setNamespaceArgsForCall) +} + +func (fake *Instance) SetNamespaceCalls(stub func(string)) { + fake.setNamespaceMutex.Lock() + defer fake.setNamespaceMutex.Unlock() + fake.SetNamespaceStub = stub +} + +func (fake *Instance) SetNamespaceArgsForCall(i int) string { + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + argsForCall := fake.setNamespaceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetOwnerReferences(arg1 []v1.OwnerReference) { + var arg1Copy []v1.OwnerReference + if arg1 != nil { + arg1Copy = make([]v1.OwnerReference, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setOwnerReferencesMutex.Lock() + fake.setOwnerReferencesArgsForCall = append(fake.setOwnerReferencesArgsForCall, struct { + arg1 []v1.OwnerReference + }{arg1Copy}) + stub := fake.SetOwnerReferencesStub + fake.recordInvocation("SetOwnerReferences", []interface{}{arg1Copy}) + fake.setOwnerReferencesMutex.Unlock() + if stub != nil { + fake.SetOwnerReferencesStub(arg1) + } +} + +func (fake *Instance) SetOwnerReferencesCallCount() int { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + return len(fake.setOwnerReferencesArgsForCall) +} + +func (fake *Instance) SetOwnerReferencesCalls(stub func([]v1.OwnerReference)) { + fake.setOwnerReferencesMutex.Lock() + defer fake.setOwnerReferencesMutex.Unlock() + fake.SetOwnerReferencesStub = stub +} + +func (fake *Instance) SetOwnerReferencesArgsForCall(i int) []v1.OwnerReference { + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + argsForCall := fake.setOwnerReferencesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetResourceVersion(arg1 string) { + fake.setResourceVersionMutex.Lock() + fake.setResourceVersionArgsForCall = append(fake.setResourceVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetResourceVersionStub + fake.recordInvocation("SetResourceVersion", []interface{}{arg1}) + fake.setResourceVersionMutex.Unlock() + if stub != nil { + fake.SetResourceVersionStub(arg1) + } +} + +func (fake *Instance) SetResourceVersionCallCount() int { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + return len(fake.setResourceVersionArgsForCall) +} + +func (fake *Instance) SetResourceVersionCalls(stub func(string)) { + fake.setResourceVersionMutex.Lock() + defer fake.setResourceVersionMutex.Unlock() + fake.SetResourceVersionStub = stub +} + +func (fake *Instance) SetResourceVersionArgsForCall(i int) string { + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + argsForCall := fake.setResourceVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetSelfLink(arg1 string) { + fake.setSelfLinkMutex.Lock() + fake.setSelfLinkArgsForCall = append(fake.setSelfLinkArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetSelfLinkStub + fake.recordInvocation("SetSelfLink", []interface{}{arg1}) + fake.setSelfLinkMutex.Unlock() + if stub != nil { + fake.SetSelfLinkStub(arg1) + } +} + +func (fake *Instance) SetSelfLinkCallCount() int { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + return len(fake.setSelfLinkArgsForCall) +} + +func (fake *Instance) SetSelfLinkCalls(stub func(string)) { + fake.setSelfLinkMutex.Lock() + defer fake.setSelfLinkMutex.Unlock() + fake.SetSelfLinkStub = stub +} + +func (fake *Instance) SetSelfLinkArgsForCall(i int) string { + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + argsForCall := fake.setSelfLinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) SetUID(arg1 types.UID) { + fake.setUIDMutex.Lock() + fake.setUIDArgsForCall = append(fake.setUIDArgsForCall, struct { + arg1 types.UID + }{arg1}) + stub := fake.SetUIDStub + fake.recordInvocation("SetUID", []interface{}{arg1}) + fake.setUIDMutex.Unlock() + if stub != nil { + fake.SetUIDStub(arg1) + } +} + +func (fake *Instance) SetUIDCallCount() int { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + return len(fake.setUIDArgsForCall) +} + +func (fake *Instance) SetUIDCalls(stub func(types.UID)) { + fake.setUIDMutex.Lock() + defer fake.setUIDMutex.Unlock() + fake.SetUIDStub = stub +} + +func (fake *Instance) SetUIDArgsForCall(i int) types.UID { + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + argsForCall := fake.setUIDArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) UsingHSMProxy() bool { + fake.usingHSMProxyMutex.Lock() + ret, specificReturn := fake.usingHSMProxyReturnsOnCall[len(fake.usingHSMProxyArgsForCall)] + fake.usingHSMProxyArgsForCall = append(fake.usingHSMProxyArgsForCall, struct { + }{}) + stub := fake.UsingHSMProxyStub + fakeReturns := fake.usingHSMProxyReturns + fake.recordInvocation("UsingHSMProxy", []interface{}{}) + fake.usingHSMProxyMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) UsingHSMProxyCallCount() int { + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + return len(fake.usingHSMProxyArgsForCall) +} + +func (fake *Instance) UsingHSMProxyCalls(stub func() bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = stub +} + +func (fake *Instance) UsingHSMProxyReturns(result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + fake.usingHSMProxyReturns = struct { + result1 bool + }{result1} +} + +func (fake *Instance) UsingHSMProxyReturnsOnCall(i int, result1 bool) { + fake.usingHSMProxyMutex.Lock() + defer fake.usingHSMProxyMutex.Unlock() + fake.UsingHSMProxyStub = nil + if fake.usingHSMProxyReturnsOnCall == nil { + fake.usingHSMProxyReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.usingHSMProxyReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Instance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deepCopyObjectMutex.RLock() + defer fake.deepCopyObjectMutex.RUnlock() + fake.getAnnotationsMutex.RLock() + defer fake.getAnnotationsMutex.RUnlock() + fake.getClusterNameMutex.RLock() + defer fake.getClusterNameMutex.RUnlock() + fake.getConfigOverrideMutex.RLock() + defer fake.getConfigOverrideMutex.RUnlock() + fake.getCreationTimestampMutex.RLock() + defer fake.getCreationTimestampMutex.RUnlock() + fake.getDeletionGracePeriodSecondsMutex.RLock() + defer fake.getDeletionGracePeriodSecondsMutex.RUnlock() + fake.getDeletionTimestampMutex.RLock() + defer fake.getDeletionTimestampMutex.RUnlock() + fake.getFinalizersMutex.RLock() + defer fake.getFinalizersMutex.RUnlock() + fake.getGenerateNameMutex.RLock() + defer fake.getGenerateNameMutex.RUnlock() + fake.getGenerationMutex.RLock() + defer fake.getGenerationMutex.RUnlock() + fake.getLabelsMutex.RLock() + defer fake.getLabelsMutex.RUnlock() + fake.getManagedFieldsMutex.RLock() + defer fake.getManagedFieldsMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getNamespaceMutex.RLock() + defer fake.getNamespaceMutex.RUnlock() + fake.getObjectKindMutex.RLock() + defer fake.getObjectKindMutex.RUnlock() + fake.getOwnerReferencesMutex.RLock() + defer fake.getOwnerReferencesMutex.RUnlock() + fake.getResourceVersionMutex.RLock() + defer fake.getResourceVersionMutex.RUnlock() + fake.getSelfLinkMutex.RLock() + defer fake.getSelfLinkMutex.RUnlock() + fake.getUIDMutex.RLock() + defer fake.getUIDMutex.RUnlock() + fake.isHSMEnabledMutex.RLock() + defer fake.isHSMEnabledMutex.RUnlock() + fake.setAnnotationsMutex.RLock() + defer fake.setAnnotationsMutex.RUnlock() + fake.setClusterNameMutex.RLock() + defer fake.setClusterNameMutex.RUnlock() + fake.setCreationTimestampMutex.RLock() + defer fake.setCreationTimestampMutex.RUnlock() + fake.setDeletionGracePeriodSecondsMutex.RLock() + defer fake.setDeletionGracePeriodSecondsMutex.RUnlock() + fake.setDeletionTimestampMutex.RLock() + defer fake.setDeletionTimestampMutex.RUnlock() + fake.setFinalizersMutex.RLock() + defer fake.setFinalizersMutex.RUnlock() + fake.setGenerateNameMutex.RLock() + defer fake.setGenerateNameMutex.RUnlock() + fake.setGenerationMutex.RLock() + defer fake.setGenerationMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.setManagedFieldsMutex.RLock() + defer fake.setManagedFieldsMutex.RUnlock() + fake.setNameMutex.RLock() + defer fake.setNameMutex.RUnlock() + fake.setNamespaceMutex.RLock() + defer fake.setNamespaceMutex.RUnlock() + fake.setOwnerReferencesMutex.RLock() + defer fake.setOwnerReferencesMutex.RUnlock() + fake.setResourceVersionMutex.RLock() + defer fake.setResourceVersionMutex.RUnlock() + fake.setSelfLinkMutex.RLock() + defer fake.setSelfLinkMutex.RUnlock() + fake.setUIDMutex.RLock() + defer fake.setUIDMutex.RUnlock() + fake.usingHSMProxyMutex.RLock() + defer fake.usingHSMProxyMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Instance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ cryptogen.Instance = new(Instance) diff --git a/pkg/initializer/orderer/config/v1/config_suite_test.go b/pkg/initializer/orderer/config/v1/config_suite_test.go new file mode 100644 index 00000000..7969f062 --- /dev/null +++ b/pkg/initializer/orderer/config/v1/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Config Suite") +} diff --git a/pkg/initializer/orderer/config/v1/config_test.go b/pkg/initializer/orderer/config/v1/config_test.go new file mode 100644 index 00000000..078e27ff --- /dev/null +++ b/pkg/initializer/orderer/config/v1/config_test.go @@ -0,0 +1,200 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1_test + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Orderer configuration", func() { + Context("reading and writing orderer configuration file", func() { + BeforeEach(func() { + config := &config.Orderer{} + + err := config.WriteToFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + It("creates orderer.yaml", func() { + Expect("/tmp/orderer.yaml").Should(BeAnExistingFile()) + }) + + It("read orderer.yaml", func() { + _, err := config.ReadOrdererFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("merges current configuration with overrides values", func() { + It("merges with defaults based on HSM proxy", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &config.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &commonapi.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + Expect(orderer.General.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(orderer.General.BCCSP.PKCS11.Label).To(Equal("label2")) + Expect(orderer.General.BCCSP.PKCS11.Pin).To(Equal("2222")) + Expect(orderer.General.BCCSP.PKCS11.HashFamily).To(Equal("SHA3")) + Expect(orderer.General.BCCSP.PKCS11.SecLevel).To(Equal(512)) + Expect(orderer.General.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore3")) + }) + + It("correctly merges boolean fields", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + trueVal := true + orderer.General.Authentication.NoExpirationChecks = &trueVal + orderer.General.Profile.Enabled = &trueVal + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(true)) + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + + falseVal := false + newConfig := &config.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + Authentication: v1.Authentication{ + NoExpirationChecks: &falseVal, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + + By("setting field from 'true' to 'false' if bool pointer set to 'false' in override config", func() { + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(false)) + }) + + By("persisting boolean fields set to 'true' when bool pointer not set to 'false' in override config", func() { + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + }) + + }) + }) + + It("reads in orderer.yaml and unmarshal it to peer config", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + // General + general := orderer.General + By("setting General.LedgerType", func() { + Expect(general.LedgerType).To(Equal("file")) + }) + + By("setting General.ListenAddress", func() { + Expect(general.ListenAddress).To(Equal("127.0.0.1")) + }) + + By("setting General.ListenPort", func() { + Expect(general.ListenPort).To(Equal(uint16(7050))) + }) + + By("setting General.TLS.Enabled", func() { + Expect(*general.TLS.Enabled).To(Equal(true)) + }) + + By("setting General.TLS.PrivateKey", func() { + Expect(general.TLS.PrivateKey).To(Equal("tls/server.key")) + }) + + By("setting General.TLS.Certificate", func() { + Expect(general.TLS.Certificate).To(Equal("tls/server.crt")) + }) + + By("setting General.TLS.RootCAs", func() { + Expect(general.TLS.RootCAs).To(Equal([]string{"tls/ca.crt"})) + }) + + By("setting General.TLS.ClientAuthRequired", func() { + Expect(*general.TLS.ClientAuthRequired).To(Equal(true)) + }) + + By("setting General.TLS.ClientRootCAs", func() { + Expect(general.TLS.ClientRootCAs).To(Equal([]string{"tls/client.crt"})) + }) + + By("setting General.BCCSP.ProviderName", func() { + Expect(general.BCCSP.ProviderName).To(Equal("SW")) + }) + + By("setting General.BCCSP.SW.HashFamily", func() { + Expect(general.BCCSP.SW.HashFamily).To(Equal("SHA2")) + }) + + By("setting General.BCCSP.SW.SecLevel", func() { + Expect(general.BCCSP.SW.SecLevel).To(Equal(256)) + }) + + By("setting General.BCCSP.SW.FileKeyStore.KeyStore", func() { + Expect(general.BCCSP.SW.FileKeyStore.KeyStorePath).To(Equal("msp/keystore")) + }) + + By("setting BCCSP.PKCS11.Library", func() { + Expect(general.BCCSP.PKCS11.Library).To(Equal("library1")) + }) + + By("setting BCCSP.PKCS11.Label", func() { + Expect(general.BCCSP.PKCS11.Label).To(Equal("label1")) + }) + + By("setting BCCSP.PKCS11.Pin", func() { + Expect(general.BCCSP.PKCS11.Pin).To(Equal("1234")) + }) + + By("setting BCCSP.PKCS11.HashFamily", func() { + Expect(general.BCCSP.PKCS11.HashFamily).To(Equal("SHA2")) + }) + + By("setting BCCSP.PKCS11.Security", func() { + Expect(general.BCCSP.PKCS11.SecLevel).To(Equal(256)) + }) + + By("setting BCCSP.PKCS11.FileKeystore.KeystorePath", func() { + Expect(general.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore2")) + }) + }) +}) diff --git a/pkg/initializer/orderer/config/v1/io.go b/pkg/initializer/orderer/config/v1/io.go new file mode 100644 index 00000000..ad1f7baf --- /dev/null +++ b/pkg/initializer/orderer/config/v1/io.go @@ -0,0 +1,61 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "io/ioutil" + "path/filepath" + + "sigs.k8s.io/yaml" +) + +func ReadOrdererFile(path string) (*Orderer, error) { + config, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + orderer := &Orderer{} + err = yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadOrdererFromBytes(config []byte) (*Orderer, error) { + orderer := &Orderer{} + err := yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadFrom(from *[]byte) (*Orderer, error) { + ordererConfig := &Orderer{} + err := yaml.Unmarshal(*from, ordererConfig) + if err != nil { + return nil, err + } + + return ordererConfig, nil +} diff --git a/pkg/initializer/orderer/config/v1/orderer.go b/pkg/initializer/orderer/config/v1/orderer.go new file mode 100644 index 00000000..caefeb45 --- /dev/null +++ b/pkg/initializer/orderer/config/v1/orderer.go @@ -0,0 +1,174 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Orderer struct { + v1.Orderer `json:",inline"` +} + +func (o *Orderer) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(o) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (o *Orderer) WriteToFile(path string) error { + bytes, err := yaml.Marshal(o) + if err != nil { + return err + } + + err = ioutil.WriteFile(path, bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (o *Orderer) MergeWith(newConfig interface{}, usingHSMProxy bool) error { + newOrderer := newConfig.(*Orderer) + + if newOrderer != nil { + err := merge.WithOverwrite(o, newConfig) + if err != nil { + return errors.Wrapf(err, "failed to merge orderer configuration overrides") + } + } + + if o.UsingPKCS11() { + o.SetPKCS11Defaults(usingHSMProxy) + } + + return nil +} + +func (o *Orderer) DeepCopyInto(into *Orderer) { + b, err := json.Marshal(o) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (o *Orderer) DeepCopy() *Orderer { + if o == nil { + return nil + } + out := new(Orderer) + o.DeepCopyInto(out) + return out +} + +func (o *Orderer) UsingPKCS11() bool { + if o.General.BCCSP != nil { + if strings.ToLower(o.General.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (o *Orderer) SetPKCS11Defaults(usingHSMProxy bool) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &commonapi.PKCS11Opts{} + } + + if usingHSMProxy { + o.General.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if o.General.BCCSP.PKCS11.HashFamily == "" { + o.General.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if o.General.BCCSP.PKCS11.SecLevel == 0 { + o.General.BCCSP.PKCS11.SecLevel = 256 + } +} + +func (o *Orderer) SetDefaultKeyStore() { + if o.General.BCCSP.PKCS11 != nil { + o.General.BCCSP.PKCS11.FileKeyStore = &commonapi.FileKeyStoreOpts{ + KeyStorePath: "msp/keystore", + } + } +} + +func (o *Orderer) SetBCCSPLibrary(library string) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + o.General.BCCSP.PKCS11.Library = library +} + +func (o *Orderer) GetBCCSPSection() *commonapi.BCCSP { + return o.General.BCCSP +} + +type OrdererOverrides struct { + // Not Fabric - this is for deployment + MaxNameLength *int `json:"maxnamelength,omitempty"` +} + +func (o *OrdererOverrides) GetMaxNameLength() *int { + return o.MaxNameLength +} + +func (o *OrdererOverrides) DeepCopyInto(into *OrdererOverrides) { + b, err := json.Marshal(o) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (o *OrdererOverrides) DeepCopy() *OrdererOverrides { + if o == nil { + return nil + } + out := new(OrdererOverrides) + o.DeepCopyInto(out) + return out +} diff --git a/pkg/initializer/orderer/config/v2/config_suite_test.go b/pkg/initializer/orderer/config/v2/config_suite_test.go new file mode 100644 index 00000000..bbd7a82d --- /dev/null +++ b/pkg/initializer/orderer/config/v2/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestV2(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V2 Suite") +} diff --git a/pkg/initializer/orderer/config/v2/config_test.go b/pkg/initializer/orderer/config/v2/config_test.go new file mode 100644 index 00000000..b3b8990e --- /dev/null +++ b/pkg/initializer/orderer/config/v2/config_test.go @@ -0,0 +1,198 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" +) + +var _ = Describe("V24 Orderer Configuration", func() { + Context("reading and writing orderer configuration file", func() { + BeforeEach(func() { + config := &config.Orderer{} + + err := config.WriteToFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + It("creates orderer.yaml", func() { + Expect("/tmp/orderer.yaml").Should(BeAnExistingFile()) + }) + + It("read orderer.yaml", func() { + _, err := config.ReadOrdererFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("merges current configuration with overrides values", func() { + It("merges with defaults based on HSM proxy", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &config.Orderer{ + Orderer: v2.Orderer{ + General: v2.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &commonapi.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + Expect(orderer.General.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(orderer.General.BCCSP.PKCS11.Label).To(Equal("label2")) + Expect(orderer.General.BCCSP.PKCS11.Pin).To(Equal("2222")) + Expect(orderer.General.BCCSP.PKCS11.HashFamily).To(Equal("SHA3")) + Expect(orderer.General.BCCSP.PKCS11.SecLevel).To(Equal(512)) + Expect(orderer.General.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore3")) + }) + + It("correctly merges boolean fields", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + trueVal := true + orderer.General.Authentication.NoExpirationChecks = &trueVal + orderer.General.Profile.Enabled = &trueVal + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(true)) + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + + falseVal := false + newConfig := &config.Orderer{ + Orderer: v2.Orderer{ + General: v2.General{ + Authentication: v1.Authentication{ + NoExpirationChecks: &falseVal, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + + By("setting field from 'true' to 'false' if bool pointer set to 'false' in override config", func() { + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(false)) + }) + + By("persisting boolean fields set to 'true' when bool pointer not set to 'false' in override config", func() { + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + }) + + }) + }) + + It("reads in orderer.yaml and unmarshal it to peer config", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + // General + general := orderer.General + By("setting General.ListenAddress", func() { + Expect(general.ListenAddress).To(Equal("127.0.0.1")) + }) + + By("setting General.ListenPort", func() { + Expect(general.ListenPort).To(Equal(uint16(7050))) + }) + + By("setting General.TLS.Enabled", func() { + Expect(*general.TLS.Enabled).To(Equal(true)) + }) + + By("setting General.TLS.PrivateKey", func() { + Expect(general.TLS.PrivateKey).To(Equal("tls/server.key")) + }) + + By("setting General.TLS.Certificate", func() { + Expect(general.TLS.Certificate).To(Equal("tls/server.crt")) + }) + + By("setting General.TLS.RootCAs", func() { + Expect(general.TLS.RootCAs).To(Equal([]string{"tls/ca.crt"})) + }) + + By("setting General.TLS.ClientAuthRequired", func() { + Expect(*general.TLS.ClientAuthRequired).To(Equal(true)) + }) + + By("setting General.TLS.ClientRootCAs", func() { + Expect(general.TLS.ClientRootCAs).To(Equal([]string{"tls/client.crt"})) + }) + + By("setting General.BCCSP.ProviderName", func() { + Expect(general.BCCSP.ProviderName).To(Equal("SW")) + }) + + By("setting General.BCCSP.SW.HashFamily", func() { + Expect(general.BCCSP.SW.HashFamily).To(Equal("SHA2")) + }) + + By("setting General.BCCSP.SW.SecLevel", func() { + Expect(general.BCCSP.SW.SecLevel).To(Equal(256)) + }) + + By("setting General.BCCSP.SW.FileKeyStore.KeyStore", func() { + Expect(general.BCCSP.SW.FileKeyStore.KeyStorePath).To(Equal("msp/keystore")) + }) + + By("setting BCCSP.PKCS11.Library", func() { + Expect(general.BCCSP.PKCS11.Library).To(Equal("library1")) + }) + + By("setting BCCSP.PKCS11.Label", func() { + Expect(general.BCCSP.PKCS11.Label).To(Equal("label1")) + }) + + By("setting BCCSP.PKCS11.Pin", func() { + Expect(general.BCCSP.PKCS11.Pin).To(Equal("1234")) + }) + + By("setting BCCSP.PKCS11.HashFamily", func() { + Expect(general.BCCSP.PKCS11.HashFamily).To(Equal("SHA2")) + }) + + By("setting BCCSP.PKCS11.Security", func() { + Expect(general.BCCSP.PKCS11.SecLevel).To(Equal(256)) + }) + + By("setting BCCSP.PKCS11.FileKeystore.KeystorePath", func() { + Expect(general.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore2")) + }) + }) +}) diff --git a/pkg/initializer/orderer/config/v2/io.go b/pkg/initializer/orderer/config/v2/io.go new file mode 100644 index 00000000..02be0b10 --- /dev/null +++ b/pkg/initializer/orderer/config/v2/io.go @@ -0,0 +1,61 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2 + +import ( + "io/ioutil" + "path/filepath" + + "sigs.k8s.io/yaml" +) + +func ReadOrdererFile(path string) (*Orderer, error) { + config, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + orderer := &Orderer{} + err = yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadOrdererFromBytes(config []byte) (*Orderer, error) { + orderer := &Orderer{} + err := yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadFrom(from *[]byte) (*Orderer, error) { + ordererConfig := &Orderer{} + err := yaml.Unmarshal(*from, ordererConfig) + if err != nil { + return nil, err + } + + return ordererConfig, nil +} diff --git a/pkg/initializer/orderer/config/v2/orderer.go b/pkg/initializer/orderer/config/v2/orderer.go new file mode 100644 index 00000000..c532446b --- /dev/null +++ b/pkg/initializer/orderer/config/v2/orderer.go @@ -0,0 +1,141 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2 + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Orderer struct { + v2.Orderer `json:",inline"` +} + +func (o *Orderer) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(o) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (o *Orderer) WriteToFile(path string) error { + bytes, err := yaml.Marshal(o) + if err != nil { + return err + } + + err = ioutil.WriteFile(path, bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (o *Orderer) MergeWith(newConfig interface{}, usingHSMProxy bool) error { + newOrderer := newConfig.(*Orderer) + + if newOrderer != nil { + err := merge.WithOverwrite(o, newConfig) + if err != nil { + return errors.Wrapf(err, "failed to merge orderer configuration overrides") + } + } + + if o.UsingPKCS11() { + o.SetPKCS11Defaults(usingHSMProxy) + } + + return nil +} + +func (o *Orderer) DeepCopyInto(into *Orderer) { + b, err := json.Marshal(o) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (o *Orderer) DeepCopy() *Orderer { + if o == nil { + return nil + } + out := new(Orderer) + o.DeepCopyInto(out) + return out +} + +func (o *Orderer) UsingPKCS11() bool { + if o.General.BCCSP != nil { + if strings.ToLower(o.General.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (o *Orderer) SetPKCS11Defaults(usingHSMProxy bool) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &commonapi.PKCS11Opts{} + } + + if usingHSMProxy { + o.General.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if o.General.BCCSP.PKCS11.HashFamily == "" { + o.General.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if o.General.BCCSP.PKCS11.SecLevel == 0 { + o.General.BCCSP.PKCS11.SecLevel = 256 + } +} + +func (o *Orderer) SetBCCSPLibrary(library string) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + o.General.BCCSP.PKCS11.Library = library +} + +func (o *Orderer) SetDefaultKeyStore() { + // No-op + return +} + +func (o *Orderer) GetBCCSPSection() *commonapi.BCCSP { + return o.General.BCCSP +} diff --git a/pkg/initializer/orderer/config/v24/config_suite_test.go b/pkg/initializer/orderer/config/v24/config_suite_test.go new file mode 100644 index 00000000..522327f9 --- /dev/null +++ b/pkg/initializer/orderer/config/v24/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v24_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestV24(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V2 Suite") +} diff --git a/pkg/initializer/orderer/config/v24/config_test.go b/pkg/initializer/orderer/config/v24/config_test.go new file mode 100644 index 00000000..7e126311 --- /dev/null +++ b/pkg/initializer/orderer/config/v24/config_test.go @@ -0,0 +1,198 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v24_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v24 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v24" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" +) + +var _ = Describe("V2 Orderer Configuration", func() { + Context("reading and writing orderer configuration file", func() { + BeforeEach(func() { + config := &config.Orderer{} + + err := config.WriteToFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + It("creates orderer.yaml", func() { + Expect("/tmp/orderer.yaml").Should(BeAnExistingFile()) + }) + + It("read orderer.yaml", func() { + _, err := config.ReadOrdererFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("merges current configuration with overrides values", func() { + It("merges with defaults based on HSM proxy", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &config.Orderer{ + Orderer: v24.Orderer{ + General: v24.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &commonapi.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + Expect(orderer.General.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(orderer.General.BCCSP.PKCS11.Label).To(Equal("label2")) + Expect(orderer.General.BCCSP.PKCS11.Pin).To(Equal("2222")) + Expect(orderer.General.BCCSP.PKCS11.HashFamily).To(Equal("SHA3")) + Expect(orderer.General.BCCSP.PKCS11.SecLevel).To(Equal(512)) + Expect(orderer.General.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore3")) + }) + + It("correctly merges boolean fields", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + trueVal := true + orderer.General.Authentication.NoExpirationChecks = &trueVal + orderer.General.Profile.Enabled = &trueVal + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(true)) + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + + falseVal := false + newConfig := &config.Orderer{ + Orderer: v24.Orderer{ + General: v24.General{ + Authentication: v1.Authentication{ + NoExpirationChecks: &falseVal, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + + By("setting field from 'true' to 'false' if bool pointer set to 'false' in override config", func() { + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(false)) + }) + + By("persisting boolean fields set to 'true' when bool pointer not set to 'false' in override config", func() { + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + }) + + }) + }) + + It("reads in orderer.yaml and unmarshal it to peer config", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + // General + general := orderer.General + By("setting General.ListenAddress", func() { + Expect(general.ListenAddress).To(Equal("127.0.0.1")) + }) + + By("setting General.ListenPort", func() { + Expect(general.ListenPort).To(Equal(uint16(7050))) + }) + + By("setting General.TLS.Enabled", func() { + Expect(*general.TLS.Enabled).To(Equal(true)) + }) + + By("setting General.TLS.PrivateKey", func() { + Expect(general.TLS.PrivateKey).To(Equal("tls/server.key")) + }) + + By("setting General.TLS.Certificate", func() { + Expect(general.TLS.Certificate).To(Equal("tls/server.crt")) + }) + + By("setting General.TLS.RootCAs", func() { + Expect(general.TLS.RootCAs).To(Equal([]string{"tls/ca.crt"})) + }) + + By("setting General.TLS.ClientAuthRequired", func() { + Expect(*general.TLS.ClientAuthRequired).To(Equal(true)) + }) + + By("setting General.TLS.ClientRootCAs", func() { + Expect(general.TLS.ClientRootCAs).To(Equal([]string{"tls/client.crt"})) + }) + + By("setting General.BCCSP.ProviderName", func() { + Expect(general.BCCSP.ProviderName).To(Equal("SW")) + }) + + By("setting General.BCCSP.SW.HashFamily", func() { + Expect(general.BCCSP.SW.HashFamily).To(Equal("SHA2")) + }) + + By("setting General.BCCSP.SW.SecLevel", func() { + Expect(general.BCCSP.SW.SecLevel).To(Equal(256)) + }) + + By("setting General.BCCSP.SW.FileKeyStore.KeyStore", func() { + Expect(general.BCCSP.SW.FileKeyStore.KeyStorePath).To(Equal("msp/keystore")) + }) + + By("setting BCCSP.PKCS11.Library", func() { + Expect(general.BCCSP.PKCS11.Library).To(Equal("library1")) + }) + + By("setting BCCSP.PKCS11.Label", func() { + Expect(general.BCCSP.PKCS11.Label).To(Equal("label1")) + }) + + By("setting BCCSP.PKCS11.Pin", func() { + Expect(general.BCCSP.PKCS11.Pin).To(Equal("1234")) + }) + + By("setting BCCSP.PKCS11.HashFamily", func() { + Expect(general.BCCSP.PKCS11.HashFamily).To(Equal("SHA2")) + }) + + By("setting BCCSP.PKCS11.Security", func() { + Expect(general.BCCSP.PKCS11.SecLevel).To(Equal(256)) + }) + + By("setting BCCSP.PKCS11.FileKeystore.KeystorePath", func() { + Expect(general.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore2")) + }) + }) +}) diff --git a/pkg/initializer/orderer/config/v24/io.go b/pkg/initializer/orderer/config/v24/io.go new file mode 100644 index 00000000..3b3f9083 --- /dev/null +++ b/pkg/initializer/orderer/config/v24/io.go @@ -0,0 +1,61 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v24 + +import ( + "io/ioutil" + "path/filepath" + + "sigs.k8s.io/yaml" +) + +func ReadOrdererFile(path string) (*Orderer, error) { + config, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + orderer := &Orderer{} + err = yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadOrdererFromBytes(config []byte) (*Orderer, error) { + orderer := &Orderer{} + err := yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadFrom(from *[]byte) (*Orderer, error) { + ordererConfig := &Orderer{} + err := yaml.Unmarshal(*from, ordererConfig) + if err != nil { + return nil, err + } + + return ordererConfig, nil +} diff --git a/pkg/initializer/orderer/config/v24/orderer.go b/pkg/initializer/orderer/config/v24/orderer.go new file mode 100644 index 00000000..bcabd8e8 --- /dev/null +++ b/pkg/initializer/orderer/config/v24/orderer.go @@ -0,0 +1,140 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v24 + +import ( + "encoding/json" + "io/ioutil" + "strings" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v24 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v24" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Orderer struct { + v24.Orderer `json:",inline"` +} + +func (o *Orderer) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(o) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (o *Orderer) WriteToFile(path string) error { + bytes, err := yaml.Marshal(o) + if err != nil { + return err + } + + err = ioutil.WriteFile(path, bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (o *Orderer) MergeWith(newConfig interface{}, usingHSMProxy bool) error { + newOrderer := newConfig.(*Orderer) + + if newOrderer != nil { + err := merge.WithOverwrite(o, newConfig) + if err != nil { + return errors.Wrapf(err, "failed to merge orderer configuration overrides") + } + } + + if o.UsingPKCS11() { + o.SetPKCS11Defaults(usingHSMProxy) + } + + return nil +} + +func (o *Orderer) DeepCopyInto(into *Orderer) { + b, err := json.Marshal(o) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (o *Orderer) DeepCopy() *Orderer { + if o == nil { + return nil + } + out := new(Orderer) + o.DeepCopyInto(out) + return out +} + +func (o *Orderer) UsingPKCS11() bool { + if o.General.BCCSP != nil { + if strings.ToLower(o.General.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (o *Orderer) SetPKCS11Defaults(usingHSMProxy bool) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &commonapi.PKCS11Opts{} + } + + if usingHSMProxy { + o.General.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if o.General.BCCSP.PKCS11.HashFamily == "" { + o.General.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if o.General.BCCSP.PKCS11.SecLevel == 0 { + o.General.BCCSP.PKCS11.SecLevel = 256 + } +} + +func (o *Orderer) SetBCCSPLibrary(library string) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &commonapi.PKCS11Opts{} + } + + o.General.BCCSP.PKCS11.Library = library +} + +func (o *Orderer) SetDefaultKeyStore() { + // No-op + return +} + +func (o *Orderer) GetBCCSPSection() *commonapi.BCCSP { + return o.General.BCCSP +} diff --git a/pkg/initializer/orderer/configtx/config.go b/pkg/initializer/orderer/configtx/config.go new file mode 100644 index 00000000..a0be5d1e --- /dev/null +++ b/pkg/initializer/orderer/configtx/config.go @@ -0,0 +1,157 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx + +import ( + "time" + + "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" +) + +const ( + // ConsensusTypeSolo identifies the solo consensus implementation. + ConsensusTypeSolo = "solo" + // ConsensusTypeKafka identifies the Kafka-based consensus implementation. + ConsensusTypeKafka = "kafka" + // ConsensusTypeKafka identifies the Kafka-based consensus implementation. + ConsensusTypeEtcdRaft = "etcdraft" + + // BlockValidationPolicyKey + BlockValidationPolicyKey = "BlockValidation" + + // OrdererAdminsPolicy is the absolute path to the orderer admins policy + OrdererAdminsPolicy = "/Channel/Orderer/Admins" + + // SignaturePolicyType is the 'Type' string for signature policies + SignaturePolicyType = "Signature" + + // ImplicitMetaPolicyType is the 'Type' string for implicit meta policies + ImplicitMetaPolicyType = "ImplicitMeta" + + // AdminRoleAdminPrincipal is set as AdminRole to cause the MSP role of + // type Admin to be used as the admin principal default + AdminRoleAdminPrincipal = "Role.ADMIN" +) + +// TopLevel consists of the structs used by the configtxgen tool. +type TopLevel struct { + Profiles map[string]*Profile `yaml:"Profiles"` + Organizations []*Organization `yaml:"Organizations"` + Channel *Profile `yaml:"Channel"` + Application *Application `yaml:"Application"` + Orderer *Orderer `yaml:"Orderer"` + Capabilities map[string]map[string]bool `yaml:"Capabilities"` + Resources *Resources `yaml:"Resources"` +} + +// Profile encodes orderer/application configuration combinations for the +// configtxgen tool. +type Profile struct { + Consortium string `yaml:"Consortium"` + Application *Application `yaml:"Application"` + Orderer *Orderer `yaml:"Orderer"` + Consortiums map[string]*Consortium `yaml:"Consortiums"` + Capabilities map[string]bool `yaml:"Capabilities"` + Policies map[string]*Policy `yaml:"Policies"` +} + +// Policy encodes a channel config policy +type Policy struct { + Type string `yaml:"Type"` + Rule string `yaml:"Rule"` +} + +// Consortium represents a group of organizations which may create channels +// with each other +type Consortium struct { + Organizations []*Organization `yaml:"Organizations"` +} + +// Application encodes the application-level configuration needed in config +// transactions. +type Application struct { + Organizations []*Organization `yaml:"Organizations"` + Capabilities map[string]bool `yaml:"Capabilities"` + Resources *Resources `yaml:"Resources"` + Policies map[string]*Policy `yaml:"Policies"` + ACLs map[string]string `yaml:"ACLs"` +} + +// Resources encodes the application-level resources configuration needed to +// seed the resource tree +type Resources struct { + DefaultModPolicy string +} + +// Organization encodes the organization-level configuration needed in +// config transactions. +type Organization struct { + Name string `yaml:"Name"` + ID string `yaml:"ID"` + MSPDir string `yaml:"MSPDir"` + MSPType string `yaml:"MSPType"` + Policies map[string]*Policy `yaml:"Policies"` + + // Note: Viper deserialization does not seem to care for + // embedding of types, so we use one organization struct + // for both orderers and applications. + AnchorPeers []*AnchorPeer `yaml:"AnchorPeers"` + OrdererEndpoints []string `yaml:"OrdererEndpoints"` + + // AdminPrincipal is deprecated and may be removed in a future release + // it was used for modifying the default policy generation, but policies + // may now be specified explicitly so it is redundant and unnecessary + AdminPrincipal string `yaml:"AdminPrincipal"` + + // SkipAsForeign indicates that this org definition is actually unknown to this + // instance of the tool, so, parsing of this org's parameters should be ignored. + SkipAsForeign bool +} + +// AnchorPeer encodes the necessary fields to identify an anchor peer. +type AnchorPeer struct { + Host string `yaml:"Host"` + Port int `yaml:"Port"` +} + +// Orderer contains configuration associated to a channel. +type Orderer struct { + OrdererType string `yaml:"OrdererType"` + Addresses []string `yaml:"Addresses"` + BatchTimeout time.Duration `yaml:"BatchTimeout"` + BatchSize BatchSize `yaml:"BatchSize"` + Kafka Kafka `yaml:"Kafka"` + EtcdRaft *etcdraft.ConfigMetadata `yaml:"EtcdRaft"` + Organizations []*Organization `yaml:"Organizations"` + MaxChannels uint64 `yaml:"MaxChannels"` + Capabilities map[string]bool `yaml:"Capabilities"` + Policies map[string]*Policy `yaml:"Policies"` +} + +// BatchSize contains configuration affecting the size of batches. +type BatchSize struct { + MaxMessageCount uint32 `yaml:"MaxMessageCount"` + AbsoluteMaxBytes uint32 `yaml:"AbsoluteMaxBytes"` + PreferredMaxBytes uint32 `yaml:"PreferredMaxBytes"` +} + +// Kafka contains configuration for the Kafka-based orderer. +type Kafka struct { + Brokers []string `yaml:"Brokers"` +} diff --git a/pkg/initializer/orderer/configtx/configtx.go b/pkg/initializer/orderer/configtx/configtx.go new file mode 100644 index 00000000..94e90ee8 --- /dev/null +++ b/pkg/initializer/orderer/configtx/configtx.go @@ -0,0 +1,198 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx + +import ( + "path/filepath" + "time" + + "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" + "github.com/hyperledger/fabric/common/viperutil" + "github.com/pkg/errors" + "github.com/spf13/viper" +) + +// +k8s:openapi-gen=true +func GetGenesisDefaults() *TopLevel { + return &TopLevel{ + Profiles: map[string]*Profile{ + "Initial": &Profile{ + Orderer: &Orderer{ + Organizations: []*Organization{}, + OrdererType: "etcdraft", + Addresses: []string{}, + BatchTimeout: 2 * time.Second, + BatchSize: BatchSize{ + MaxMessageCount: 500, + AbsoluteMaxBytes: 10 * 1024 * 1024, + PreferredMaxBytes: 2 * 1024 * 1024, + }, + EtcdRaft: &etcdraft.ConfigMetadata{ + Consenters: []*etcdraft.Consenter{}, + Options: &etcdraft.Options{ + TickInterval: "500ms", + ElectionTick: 10, + HeartbeatTick: 1, + MaxInflightBlocks: 5, + SnapshotIntervalSize: 20 * 1024 * 1024, // 20 MB + }, + }, + Capabilities: map[string]bool{ + "V1_4_2": true, + }, + Policies: map[string]*Policy{ + "Readers": &Policy{ + Type: "ImplicitMeta", + Rule: "ANY Readers", + }, + "Writers": &Policy{ + Type: "ImplicitMeta", + Rule: "ANY Writers", + }, + "Admins": &Policy{ + Type: "ImplicitMeta", + Rule: "ANY Admins", + }, + "BlockValidation": &Policy{ + Type: "ImplicitMeta", + Rule: "ANY Writers", + }, + }, + }, + + Consortiums: map[string]*Consortium{ + "SampleConsortium": &Consortium{}, + }, + Capabilities: map[string]bool{ + "V1_4_3": true, + }, + Policies: map[string]*Policy{ + "Readers": &Policy{ + Type: "ImplicitMeta", + Rule: "ANY Readers", + }, + "Writers": &Policy{ + Type: "ImplicitMeta", + Rule: "ANY Writers", + }, + "Admins": &Policy{ + Type: "ImplicitMeta", + Rule: "MAJORITY Admins", + }, + }, + }, + }, + } +} + +func LoadTopLevelConfig(configFile string) (*TopLevel, error) { + config := viper.New() + configDir, err := filepath.Abs(filepath.Dir(configFile)) + if err != nil { + return nil, errors.Wrap(err, "error getting absolute path") + } + config.AddConfigPath(configDir) + config.SetConfigName("configtx") + + err = config.ReadInConfig() + if err != nil { + return nil, errors.Wrap(err, "error reading configuration") + } + + var uconf TopLevel + err = viperutil.EnhancedExactUnmarshal(config, &uconf) + if err != nil { + return nil, errors.Wrap(err, "error unmarshaling config into struct") + } + + return &uconf, nil +} + +type ConfigTx struct { + Config *TopLevel +} + +func New() *ConfigTx { + c := &ConfigTx{ + Config: GetGenesisDefaults(), + } + + return c +} + +func (c *ConfigTx) GetProfile(name string) (*Profile, error) { + p, found := c.Config.Profiles[name] + if !found { + return nil, errors.Errorf("profile '%s' does not exist", name) + } + + err := c.CompleteProfileInitialization(p) + if err != nil { + return nil, err + } + + return p, nil +} + +func (c *ConfigTx) CompleteProfileInitialization(p *Profile) error { + if p.Orderer != nil { + return c.CompleteOrdererInitialization(p.Orderer) + } + + return nil +} + +func (c *ConfigTx) CompleteOrdererInitialization(ord *Orderer) error { + // Additional, consensus type-dependent initialization goes here + // Also using this to panic on unknown orderer type. + switch ord.OrdererType { + case ConsensusTypeSolo: + // nothing to be done here + case ConsensusTypeKafka: + // nothing to be done here + case ConsensusTypeEtcdRaft: + if _, err := time.ParseDuration(ord.EtcdRaft.Options.TickInterval); err != nil { + return errors.Errorf("Etcdraft TickInterval (%s) must be in time duration format", ord.EtcdRaft.Options.TickInterval) + } + + // validate the specified members for Options + if ord.EtcdRaft.Options.ElectionTick <= ord.EtcdRaft.Options.HeartbeatTick { + return errors.Errorf("election tick must be greater than heartbeat tick") + } + + for _, c := range ord.EtcdRaft.GetConsenters() { + if c.Host == "" { + return errors.Errorf("consenter info in %s configuration did not specify host", ConsensusTypeEtcdRaft) + } + if c.Port == 0 { + return errors.Errorf("consenter info in %s configuration did not specify port", ConsensusTypeEtcdRaft) + } + if c.ClientTlsCert == nil { + return errors.Errorf("consenter info in %s configuration did not specify client TLS cert", ConsensusTypeEtcdRaft) + } + if c.ServerTlsCert == nil { + return errors.Errorf("consenter info in %s configuration did not specify server TLS cert", ConsensusTypeEtcdRaft) + } + } + default: + return errors.Errorf("unknown orderer type: %s", ord.OrdererType) + } + + return nil +} diff --git a/pkg/initializer/orderer/configtx/configtx_suite_test.go b/pkg/initializer/orderer/configtx/configtx_suite_test.go new file mode 100644 index 00000000..c6ef6fcf --- /dev/null +++ b/pkg/initializer/orderer/configtx/configtx_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfigtx(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Configtx Suite") +} diff --git a/pkg/initializer/orderer/configtx/configtx_test.go b/pkg/initializer/orderer/configtx/configtx_test.go new file mode 100644 index 00000000..2e5c5955 --- /dev/null +++ b/pkg/initializer/orderer/configtx/configtx_test.go @@ -0,0 +1,53 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/configtx" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + defaultConfigTxFile = "../../../../testdata/init/orderer/configtx.yaml" + defaultConfigTxDir = "../../../../testdata/init/orderer" +) + +var _ = Describe("configtx", func() { + var ( + err error + configTx *configtx.ConfigTx + ) + + BeforeEach(func() { + configTx = configtx.New() + }) + + It("returns an error if profile does not exist", func() { + _, err = configTx.GetProfile("badprofile") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("profile 'badprofile' does not exist")) + }) + + It("loads top level config from file", func() { + config, err := configtx.LoadTopLevelConfig(defaultConfigTxFile) + Expect(err).NotTo(HaveOccurred()) + Expect(config).NotTo(BeNil()) + }) +}) diff --git a/pkg/initializer/orderer/configtx/encoder.go b/pkg/initializer/orderer/configtx/encoder.go new file mode 100644 index 00000000..e467ab3a --- /dev/null +++ b/pkg/initializer/orderer/configtx/encoder.go @@ -0,0 +1,205 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx + +import ( + cb "github.com/hyperledger/fabric-protos-go/common" + pb "github.com/hyperledger/fabric-protos-go/peer" + "github.com/hyperledger/fabric/common/cauthdsl" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/policies" + "github.com/hyperledger/fabric/msp" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +// NewApplicationGroup returns the application component of the channel configuration. It defines the organizations which are involved +// in application logic like chaincodes, and how these members may interact with the orderer. It sets the mod_policy of all elements to "Admins". +func NewApplicationGroup(conf *Application) (*cb.ConfigGroup, error) { + applicationGroup := protoutil.NewConfigGroup() + if err := AddPolicies(applicationGroup, conf.Policies, channelconfig.AdminsPolicyKey); err != nil { + return nil, errors.Wrapf(err, "error adding policies to application group") + } + + if len(conf.ACLs) > 0 { + addValue(applicationGroup, channelconfig.ACLValues(conf.ACLs), channelconfig.AdminsPolicyKey) + } + + if len(conf.Capabilities) > 0 { + addValue(applicationGroup, channelconfig.CapabilitiesValue(conf.Capabilities), channelconfig.AdminsPolicyKey) + } + + for _, org := range conf.Organizations { + var err error + applicationGroup.Groups[org.Name], err = NewApplicationOrgGroup(org) + if err != nil { + return nil, errors.Wrap(err, "failed to create application org") + } + } + + applicationGroup.ModPolicy = channelconfig.AdminsPolicyKey + return applicationGroup, nil +} + +// NewApplicationOrgGroup returns an application org component of the channel configuration. It defines the crypto material for the organization +// (its MSP) as well as its anchor peers for use by the gossip network. It sets the mod_policy of all elements to "Admins". +func NewApplicationOrgGroup(conf *Organization) (*cb.ConfigGroup, error) { + applicationOrgGroup := protoutil.NewConfigGroup() + applicationOrgGroup.ModPolicy = channelconfig.AdminsPolicyKey + + if conf.SkipAsForeign { + return applicationOrgGroup, nil + } + + mspConfig, err := msp.GetVerifyingMspConfig(conf.MSPDir, conf.ID, conf.MSPType) + if err != nil { + return nil, errors.Wrapf(err, "1 - Error loading MSP configuration for org %s", conf.Name) + } + + if err := AddPolicies(applicationOrgGroup, conf.Policies, channelconfig.AdminsPolicyKey); err != nil { + return nil, errors.Wrapf(err, "error adding policies to application org group %s", conf.Name) + } + addValue(applicationOrgGroup, channelconfig.MSPValue(mspConfig), channelconfig.AdminsPolicyKey) + + var anchorProtos []*pb.AnchorPeer + for _, anchorPeer := range conf.AnchorPeers { + anchorProtos = append(anchorProtos, &pb.AnchorPeer{ + Host: anchorPeer.Host, + Port: int32(anchorPeer.Port), + }) + } + + // Avoid adding an unnecessary anchor peers element when one is not required. This helps + // prevent a delta from the orderer system channel when computing more complex channel + // creation transactions + if len(anchorProtos) > 0 { + addValue(applicationOrgGroup, channelconfig.AnchorPeersValue(anchorProtos), channelconfig.AdminsPolicyKey) + } + + return applicationOrgGroup, nil +} + +// NewConsortiumsGroup returns the consortiums component of the channel configuration. This element is only defined for the ordering system channel. +// It sets the mod_policy for all elements to "/Channel/Orderer/Admins". +func NewConsortiumsGroup(conf map[string]*Consortium) (*cb.ConfigGroup, error) { + consortiumsGroup := protoutil.NewConfigGroup() + // This policy is not referenced anywhere, it is only used as part of the implicit meta policy rule at the channel level, so this setting + // effectively degrades control of the ordering system channel to the ordering admins + addPolicy(consortiumsGroup, policies.SignaturePolicy(channelconfig.AdminsPolicyKey, cauthdsl.AcceptAllPolicy), ordererAdminsPolicyName) + + for consortiumName, consortium := range conf { + var err error + consortiumsGroup.Groups[consortiumName], err = NewConsortiumGroup(consortium) + if err != nil { + return nil, errors.Wrapf(err, "failed to create consortium %s", consortiumName) + } + } + + consortiumsGroup.ModPolicy = ordererAdminsPolicyName + return consortiumsGroup, nil +} + +// NewConsortiums returns a consortiums component of the channel configuration. Each consortium defines the organizations which may be involved in channel +// creation, as well as the channel creation policy the orderer checks at channel creation time to authorize the action. It sets the mod_policy of all +// elements to "/Channel/Orderer/Admins". +func NewConsortiumGroup(conf *Consortium) (*cb.ConfigGroup, error) { + consortiumGroup := protoutil.NewConfigGroup() + + for _, org := range conf.Organizations { + var err error + consortiumGroup.Groups[org.Name], err = NewConsortiumOrgGroup(org) + if err != nil { + return nil, errors.Wrap(err, "failed to create consortium org") + } + } + + addValue(consortiumGroup, channelconfig.ChannelCreationPolicyValue(policies.ImplicitMetaAnyPolicy(channelconfig.AdminsPolicyKey).Value()), ordererAdminsPolicyName) + + consortiumGroup.ModPolicy = ordererAdminsPolicyName + return consortiumGroup, nil +} + +// NewConsortiumsGroup returns an org component of the channel configuration. It defines the crypto material for the +// organization (its MSP). It sets the mod_policy of all elements to "Admins". +func NewConsortiumOrgGroup(conf *Organization) (*cb.ConfigGroup, error) { + consortiumsOrgGroup := protoutil.NewConfigGroup() + consortiumsOrgGroup.ModPolicy = channelconfig.AdminsPolicyKey + + if conf.SkipAsForeign { + return consortiumsOrgGroup, nil + } + + mspConfig, err := msp.GetVerifyingMspConfig(conf.MSPDir, conf.ID, conf.MSPType) + if err != nil { + return nil, errors.Wrapf(err, "error loading MSP configuration for org: %s", conf.Name) + } + + if err := AddPolicies(consortiumsOrgGroup, conf.Policies, channelconfig.AdminsPolicyKey); err != nil { + return nil, errors.Wrapf(err, "error adding policies to consortiums org group '%s'", conf.Name) + } + + addValue(consortiumsOrgGroup, channelconfig.MSPValue(mspConfig), channelconfig.AdminsPolicyKey) + + return consortiumsOrgGroup, nil +} + +func AddPolicies(cg *cb.ConfigGroup, policyMap map[string]*Policy, modPolicy string) error { + switch { + case policyMap == nil: + return errors.Errorf("no policies defined") + case policyMap[channelconfig.AdminsPolicyKey] == nil: + return errors.Errorf("no Admins policy defined") + case policyMap[channelconfig.ReadersPolicyKey] == nil: + return errors.Errorf("no Readers policy defined") + case policyMap[channelconfig.WritersPolicyKey] == nil: + return errors.Errorf("no Writers policy defined") + } + + for policyName, policy := range policyMap { + switch policy.Type { + case ImplicitMetaPolicyType: + imp, err := policies.ImplicitMetaFromString(policy.Rule) + if err != nil { + return errors.Wrapf(err, "invalid implicit meta policy rule '%s'", policy.Rule) + } + cg.Policies[policyName] = &cb.ConfigPolicy{ + ModPolicy: modPolicy, + Policy: &cb.Policy{ + Type: int32(cb.Policy_IMPLICIT_META), + Value: protoutil.MarshalOrPanic(imp), + }, + } + case SignaturePolicyType: + sp, err := cauthdsl.FromString(policy.Rule) + if err != nil { + return errors.Wrapf(err, "invalid signature policy rule '%s'", policy.Rule) + } + cg.Policies[policyName] = &cb.ConfigPolicy{ + ModPolicy: modPolicy, + Policy: &cb.Policy{ + Type: int32(cb.Policy_SIGNATURE), + Value: protoutil.MarshalOrPanic(sp), + }, + } + default: + return errors.Errorf("unknown policy type: %s", policy.Type) + } + } + return nil +} diff --git a/pkg/initializer/orderer/configtx/profile.go b/pkg/initializer/orderer/configtx/profile.go new file mode 100644 index 00000000..f8d56947 --- /dev/null +++ b/pkg/initializer/orderer/configtx/profile.go @@ -0,0 +1,340 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx + +import ( + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/hyperledger/fabric/common/cauthdsl" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/policies" + + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" + utils "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +const ( + ordererAdminsPolicyName = "/Channel/Orderer/Admins" +) + +func (p *Profile) AddOrdererAddress(address string) { + p.Orderer.Addresses = append(p.Orderer.Addresses, address) +} + +func (p *Profile) SetOrdererType(ordererType string) { + p.Orderer.OrdererType = ordererType +} + +func (p *Profile) SetCapabilitiesForOrderer(capabilities map[string]bool) { + p.Orderer.Capabilities = capabilities +} + +func (p *Profile) AddRaftConsentingNode(consenter *etcdraft.Consenter) error { + if strings.ToLower(p.Orderer.OrdererType) != "etcdraft" { + return errors.New("can only add raft consenting node if orderer type is 'etcdraft'") + } + p.Orderer.EtcdRaft.Consenters = append(p.Orderer.EtcdRaft.Consenters, consenter) + return nil +} + +func (p *Profile) AddConsortium(name string, consortium *Consortium) error { + for _, org := range consortium.Organizations { + err := ValidateOrg(org) + if err != nil { + return err + } + } + p.Consortiums[name] = consortium + return nil +} + +func (p *Profile) AddOrgToConsortium(name string, org *Organization) error { + err := ValidateOrg(org) + if err != nil { + return err + } + p.Consortiums[name].Organizations = append(p.Consortiums[name].Organizations, org) + return nil +} + +func (p *Profile) AddOrgToOrderer(org *Organization) error { + err := ValidateOrg(org) + if err != nil { + return err + } + p.Orderer.Organizations = append(p.Orderer.Organizations, org) + return nil +} + +func (p *Profile) SetMaxChannel(max uint64) { + p.Orderer.MaxChannels = max +} + +func (p *Profile) SetChannelPolicy(policies map[string]*Policy) { + p.Policies = policies +} + +func (p *Profile) GenerateBlock(channelID string, mspConfigs map[string]*msp.MSPConfig) ([]byte, error) { + if p.Orderer == nil { + return nil, errors.Errorf("refusing to generate block which is missing orderer section") + } + + if p.Consortiums == nil { + return nil, errors.New("Genesis block does not contain a consortiums group definition. This block cannot be used for orderer bootstrap.") + } + + cg, err := p.NewChannelConfigGroup(mspConfigs) + if err != nil { + return nil, err + } + + genesisBlock := p.Block(channelID, cg) + gBlockBytes, err := utils.Marshal(genesisBlock) + if err != nil { + return nil, errors.Wrap(err, "error marshalling genesis block") + } + + return gBlockBytes, nil + +} + +func (p *Profile) Block(channelID string, channelGroup *cb.ConfigGroup) *cb.Block { + payloadChannelHeader := utils.MakeChannelHeader(cb.HeaderType_CONFIG, int32(1), channelID, 0) + payloadSignatureHeader := utils.MakeSignatureHeader(nil, utils.CreateNonceOrPanic()) + utils.SetTxID(payloadChannelHeader, payloadSignatureHeader) + payloadHeader := utils.MakePayloadHeader(payloadChannelHeader, payloadSignatureHeader) + payload := &cb.Payload{Header: payloadHeader, Data: utils.MarshalOrPanic(&cb.ConfigEnvelope{Config: &cb.Config{ChannelGroup: channelGroup}})} + envelope := &cb.Envelope{Payload: utils.MarshalOrPanic(payload), Signature: nil} + + block := utils.NewBlock(0, nil) + block.Data = &cb.BlockData{Data: [][]byte{utils.MarshalOrPanic(envelope)}} + block.Header.DataHash = utils.BlockDataHash(block.Data) + block.Metadata.Metadata[cb.BlockMetadataIndex_LAST_CONFIG] = utils.MarshalOrPanic(&cb.Metadata{ + Value: utils.MarshalOrPanic(&cb.LastConfig{Index: 0}), + }) + return block +} + +func (p *Profile) NewChannelConfigGroup(mspConfigs map[string]*msp.MSPConfig) (*cb.ConfigGroup, error) { + channelGroup := utils.NewConfigGroup() + if len(p.Policies) == 0 { + addImplicitMetaPolicyDefaults(channelGroup) + } + + err := addPolicies(channelGroup, p.Policies, channelconfig.AdminsPolicyKey) + if err != nil { + return nil, errors.Wrapf(err, "error adding policies to channel group") + } + + addValue(channelGroup, channelconfig.HashingAlgorithmValue(), channelconfig.AdminsPolicyKey) + addValue(channelGroup, channelconfig.BlockDataHashingStructureValue(), channelconfig.AdminsPolicyKey) + if p.Orderer != nil && len(p.Orderer.Addresses) > 0 { + addValue(channelGroup, channelconfig.OrdererAddressesValue(p.Orderer.Addresses), ordererAdminsPolicyName) + } + + if p.Consortium != "" { + addValue(channelGroup, channelconfig.ConsortiumValue(p.Consortium), channelconfig.AdminsPolicyKey) + } + + if len(p.Capabilities) > 0 { + addValue(channelGroup, channelconfig.CapabilitiesValue(p.Capabilities), channelconfig.AdminsPolicyKey) + } + + if p.Orderer != nil { + channelGroup.Groups[channelconfig.OrdererGroupKey], err = p.NewOrdererGroup(p.Orderer, mspConfigs) + if err != nil { + return nil, errors.Wrap(err, "could not create orderer group") + } + } + + if p.Application != nil { + channelGroup.Groups[channelconfig.ApplicationGroupKey], err = NewApplicationGroup(p.Application) + if err != nil { + return nil, errors.Wrap(err, "could not create application group") + } + } + + if p.Consortiums != nil { + channelGroup.Groups[channelconfig.ConsortiumsGroupKey], err = NewConsortiumsGroup(p.Consortiums) + if err != nil { + return nil, errors.Wrap(err, "could not create consortiums group") + } + } + + channelGroup.ModPolicy = channelconfig.AdminsPolicyKey + return channelGroup, nil +} + +func (p *Profile) NewOrdererGroup(conf *Orderer, mspConfigs map[string]*msp.MSPConfig) (*cb.ConfigGroup, error) { + ordererGroup := utils.NewConfigGroup() + if len(conf.Policies) == 0 { + addImplicitMetaPolicyDefaults(ordererGroup) + } else { + if err := addPolicies(ordererGroup, conf.Policies, channelconfig.AdminsPolicyKey); err != nil { + return nil, errors.Wrapf(err, "error adding policies to orderer group") + } + } + ordererGroup.Policies[BlockValidationPolicyKey] = &cb.ConfigPolicy{ + Policy: policies.ImplicitMetaAnyPolicy(channelconfig.WritersPolicyKey).Value(), + ModPolicy: channelconfig.AdminsPolicyKey, + } + addValue(ordererGroup, channelconfig.BatchSizeValue( + conf.BatchSize.MaxMessageCount, + conf.BatchSize.AbsoluteMaxBytes, + conf.BatchSize.PreferredMaxBytes, + ), channelconfig.AdminsPolicyKey) + addValue(ordererGroup, channelconfig.BatchTimeoutValue(conf.BatchTimeout.String()), channelconfig.AdminsPolicyKey) + addValue(ordererGroup, channelconfig.ChannelRestrictionsValue(conf.MaxChannels), channelconfig.AdminsPolicyKey) + + if len(conf.Capabilities) > 0 { + addValue(ordererGroup, channelconfig.CapabilitiesValue(conf.Capabilities), channelconfig.AdminsPolicyKey) + } + + var consensusMetadata []byte + switch conf.OrdererType { + case ConsensusTypeSolo: + // nothing to be done here + case ConsensusTypeKafka: + // nothing to be done here + case ConsensusTypeEtcdRaft: + cm, err := proto.Marshal(p.Orderer.EtcdRaft) + if err != nil { + return nil, err + } + consensusMetadata = cm + default: + return nil, errors.Errorf("unknown orderer type: %s", conf.OrdererType) + } + + addValue(ordererGroup, channelconfig.ConsensusTypeValue(conf.OrdererType, consensusMetadata), channelconfig.AdminsPolicyKey) + + for _, org := range conf.Organizations { + var err error + ordererGroup.Groups[org.Name], err = NewOrdererOrgGroup(org, mspConfigs[org.Name]) + if err != nil { + return nil, errors.Wrap(err, "failed to create orderer org") + } + } + + ordererGroup.ModPolicy = channelconfig.AdminsPolicyKey + return ordererGroup, nil +} + +func ValidateOrg(org *Organization) error { + if org.MSPType == "" { + return errors.Errorf("failed to provide msp type for org '%s'", org.Name) + } + + if org.AdminPrincipal == "" { + return errors.Errorf("failed to provide admin principal") + } + + return nil +} + +// NewOrdererOrgGroup returns an orderer org component of the channel configuration. It defines the crypto material for the +// organization (its MSP). It sets the mod_policy of all elements to "Admins". +func NewOrdererOrgGroup(conf *Organization, mspConfig *msp.MSPConfig) (*cb.ConfigGroup, error) { + ordererOrgGroup := utils.NewConfigGroup() + if len(conf.Policies) == 0 { + addSignaturePolicyDefaults(ordererOrgGroup, conf.ID, conf.AdminPrincipal != AdminRoleAdminPrincipal) + } else { + if err := addPolicies(ordererOrgGroup, conf.Policies, channelconfig.AdminsPolicyKey); err != nil { + return nil, errors.Wrapf(err, "error adding policies to orderer org group '%s'", conf.Name) + } + } + + addValue(ordererOrgGroup, channelconfig.MSPValue(mspConfig), channelconfig.AdminsPolicyKey) + + ordererOrgGroup.ModPolicy = channelconfig.AdminsPolicyKey + + if len(conf.OrdererEndpoints) > 0 { + addValue(ordererOrgGroup, channelconfig.EndpointsValue(conf.OrdererEndpoints), channelconfig.AdminsPolicyKey) + } + + return ordererOrgGroup, nil +} + +func addValue(cg *cb.ConfigGroup, value channelconfig.ConfigValue, modPolicy string) { + cg.Values[value.Key()] = &cb.ConfigValue{ + Value: utils.MarshalOrPanic(value.Value()), + ModPolicy: modPolicy, + } +} + +func addPolicy(cg *cb.ConfigGroup, policy policies.ConfigPolicy, modPolicy string) { + cg.Policies[policy.Key()] = &cb.ConfigPolicy{ + Policy: policy.Value(), + ModPolicy: modPolicy, + } +} + +func addPolicies(cg *cb.ConfigGroup, policyMap map[string]*Policy, modPolicy string) error { + for policyName, policy := range policyMap { + switch policy.Type { + case ImplicitMetaPolicyType: + imp, err := policies.ImplicitMetaFromString(policy.Rule) + if err != nil { + return errors.Wrapf(err, "invalid implicit meta policy rule '%s'", policy.Rule) + } + cg.Policies[policyName] = &cb.ConfigPolicy{ + ModPolicy: modPolicy, + Policy: &cb.Policy{ + Type: int32(cb.Policy_IMPLICIT_META), + Value: utils.MarshalOrPanic(imp), + }, + } + case SignaturePolicyType: + sp, err := cauthdsl.FromString(policy.Rule) + if err != nil { + return errors.Wrapf(err, "invalid signature policy rule '%s'", policy.Rule) + } + cg.Policies[policyName] = &cb.ConfigPolicy{ + ModPolicy: modPolicy, + Policy: &cb.Policy{ + Type: int32(cb.Policy_SIGNATURE), + Value: utils.MarshalOrPanic(sp), + }, + } + default: + return errors.Errorf("unknown policy type: %s", policy.Type) + } + } + return nil +} + +func addImplicitMetaPolicyDefaults(cg *cb.ConfigGroup) { + addPolicy(cg, policies.ImplicitMetaMajorityPolicy(channelconfig.AdminsPolicyKey), channelconfig.AdminsPolicyKey) + addPolicy(cg, policies.ImplicitMetaAnyPolicy(channelconfig.ReadersPolicyKey), channelconfig.AdminsPolicyKey) + addPolicy(cg, policies.ImplicitMetaAnyPolicy(channelconfig.WritersPolicyKey), channelconfig.AdminsPolicyKey) +} + +func addSignaturePolicyDefaults(cg *cb.ConfigGroup, mspID string, devMode bool) { + if devMode { + addPolicy(cg, policies.SignaturePolicy(channelconfig.AdminsPolicyKey, cauthdsl.SignedByMspMember(mspID)), channelconfig.AdminsPolicyKey) + } else { + addPolicy(cg, policies.SignaturePolicy(channelconfig.AdminsPolicyKey, cauthdsl.SignedByMspAdmin(mspID)), channelconfig.AdminsPolicyKey) + } + addPolicy(cg, policies.SignaturePolicy(channelconfig.ReadersPolicyKey, cauthdsl.SignedByMspMember(mspID)), channelconfig.AdminsPolicyKey) + addPolicy(cg, policies.SignaturePolicy(channelconfig.WritersPolicyKey, cauthdsl.SignedByMspMember(mspID)), channelconfig.AdminsPolicyKey) +} diff --git a/pkg/initializer/orderer/configtx/profile_test.go b/pkg/initializer/orderer/configtx/profile_test.go new file mode 100644 index 00000000..a4c3d320 --- /dev/null +++ b/pkg/initializer/orderer/configtx/profile_test.go @@ -0,0 +1,188 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configtx_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/configtx" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" + "github.com/hyperledger/fabric/common/channelconfig" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("profile", func() { + var ( + err error + profile *configtx.Profile + mspConfig map[string]*msp.MSPConfig + ) + + BeforeEach(func() { + configTx := configtx.New() + profile, err = configTx.GetProfile("Initial") + Expect(err).NotTo(HaveOccurred()) + + mspConfig = map[string]*msp.MSPConfig{ + "testorg3": &msp.MSPConfig{}, + } + }) + + Context("profile configuration updates", func() { + It("adds orderer address to profile", func() { + blockBytes, err := profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(string(blockBytes)).NotTo(ContainSubstring("127.0.0.1:7051")) + + profile.AddOrdererAddress("127.0.0.1:7051") + blockBytes, err = profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(blockBytes)).To(ContainSubstring("127.0.0.1:7051")) + }) + + It("sets orderer type", func() { + profile.SetOrdererType("etcdraft") + blockBytes, err := profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(blockBytes)).To(ContainSubstring("etcdraft")) + }) + + It("adds raft consenting node", func() { + consenter := &etcdraft.Consenter{ + Host: "testrafthost", + Port: 7050, + ClientTlsCert: []byte("../../../../testdata/tls/tls.crt"), + ServerTlsCert: []byte("../../../../testdata/tls/tls.crt"), + } + + profile.SetOrdererType("etcdraft") + err := profile.AddRaftConsentingNode(consenter) + Expect(err).NotTo(HaveOccurred()) + + blockBytes, err := profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(blockBytes)).To(ContainSubstring("testrafthost")) + }) + + It("adds consortium", func() { + profile.Policies = map[string]*configtx.Policy{ + channelconfig.AdminsPolicyKey: &configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "ALL bar", + }, + channelconfig.ReadersPolicyKey: &configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "ALL bar", + }, + channelconfig.WritersPolicyKey: &configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "ALL bar", + }, + } + + org := &configtx.Organization{ + Name: "testorg", + ID: "testorg", + MSPType: "bccsp", + MSPDir: "../../../../testdata/init/orderer/msp", + AdminPrincipal: "Role.MEMBER", + Policies: profile.Policies, + } + + consortium := &configtx.Consortium{ + Organizations: []*configtx.Organization{org}, + } + + profile.SetOrdererType("etcdraft") + err := profile.AddConsortium("testconsortium", consortium) + Expect(err).NotTo(HaveOccurred()) + + blockBytes, err := profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(blockBytes)).To(ContainSubstring("testconsortium")) + Expect(string(blockBytes)).To(ContainSubstring("testorg")) + }) + + It("adds org to consortium", func() { + profile.Policies = map[string]*configtx.Policy{ + channelconfig.AdminsPolicyKey: &configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "ALL bar", + }, + channelconfig.ReadersPolicyKey: &configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "ALL bar", + }, + channelconfig.WritersPolicyKey: &configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "ALL bar", + }, + } + + org := &configtx.Organization{ + Name: "testorg", + ID: "testorg", + MSPType: "bccsp", + MSPDir: "../../../../testdata/init/orderer/msp", + AdminPrincipal: "Role.MEMBER", + Policies: profile.Policies, + } + + consortium := &configtx.Consortium{ + Organizations: []*configtx.Organization{org}, + } + + profile.SetOrdererType("etcdraft") + profile.AddConsortium("testconsortium", consortium) + + org.Name = "testorg2" + err := profile.AddOrgToConsortium("testconsortium", org) + Expect(err).NotTo(HaveOccurred()) + + blockBytes, err := profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(blockBytes)).To(ContainSubstring("testconsortium")) + Expect(string(blockBytes)).To(ContainSubstring("testorg2")) + }) + }) + + It("adds org to orderer", func() { + org := &configtx.Organization{ + Name: "testorg3", + ID: "testorg3", + MSPType: "bccsp", + MSPDir: "../../../../testdata/init/orderer/msp", + AdminPrincipal: "Role.MEMBER", + } + + profile.SetOrdererType("etcdraft") + err := profile.AddOrgToOrderer(org) + Expect(err).NotTo(HaveOccurred()) + + blockBytes, err := profile.GenerateBlock("channel1", mspConfig) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(blockBytes)).To(ContainSubstring("testorg3")) + }) +}) diff --git a/pkg/initializer/orderer/initializer.go b/pkg/initializer/orderer/initializer.go new file mode 100644 index 00000000..eb8d4f47 --- /dev/null +++ b/pkg/initializer/orderer/initializer.go @@ -0,0 +1,496 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/secretmanager" + ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + v2ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + v24ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + corev1 "k8s.io/api/core/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("orderer_initializer") + +type Config struct { + ConfigTxFile string + OrdererFile string + OrdererV2File string + OrdererV24File string + OUFile string + InterOUFile string + DeploymentFile string + PVCFile string + ServiceFile string + CMFile string + RoleFile string + ServiceAccountFile string + RoleBindingFile string + IngressFile string + Ingressv1beta1File string + RouteFile string + StoragePath string +} + +type Response struct { + Config OrdererConfig + Crypto *config.CryptoResponse +} + +//go:generate counterfeiter -o mocks/ibporderer.go -fake-name IBPOrderer . IBPOrderer + +type IBPOrderer interface { + OverrideConfig(newConfig OrdererConfig) error + GenerateCrypto() (*config.CryptoResponse, error) + GetConfig() OrdererConfig +} + +type Initializer struct { + Config *Config + Scheme *runtime.Scheme + Client k8sclient.Client + Name string + Timeouts enroller.HSMEnrollJobTimeouts + + Validator common.CryptoValidator + SecretManager *secretmanager.SecretManager +} + +func New(client controllerclient.Client, scheme *runtime.Scheme, cfg *Config, name string, validator common.CryptoValidator) *Initializer { + initializer := &Initializer{ + Client: client, + Scheme: scheme, + Config: cfg, + Name: name, + Validator: validator, + } + + initializer.SecretManager = secretmanager.New(client, scheme, initializer.GetLabels) + + return initializer +} + +func (i *Initializer) Create(overrides OrdererConfig, orderer IBPOrderer, storagePath string) (*Response, error) { + var err error + + log.Info(fmt.Sprintf("Creating orderer %s's config and crypto...", i.Name)) + + err = os.RemoveAll(storagePath) + if err != nil { + return nil, err + } + + err = orderer.OverrideConfig(overrides) + if err != nil { + return nil, err + } + + cresp, err := orderer.GenerateCrypto() + if err != nil { + return nil, err + } + + err = os.RemoveAll(storagePath) + if err != nil { + return nil, err + } + + return &Response{ + Config: orderer.GetConfig(), + Crypto: cresp, + }, nil +} + +func (i *Initializer) Update(overrides OrdererConfig, orderer IBPOrderer) (*Response, error) { + var err error + + log.Info(fmt.Sprintf("Updating orderer %s's config...", i.Name)) + + err = orderer.OverrideConfig(overrides) + if err != nil { + return nil, err + } + + return &Response{ + Config: orderer.GetConfig(), + }, nil +} + +func (i *Initializer) GetEnrollers(cryptos *config.Cryptos, instance *current.IBPOrderer, storagePath string) error { + // If no enrollment information provided, don't need to proceed further + if instance.Spec.Secret == nil || instance.Spec.Secret.Enrollment == nil { + return nil + } + + enrollmentSpec := instance.Spec.Secret.Enrollment + if enrollmentSpec.Component != nil && cryptos.Enrollment == nil { + bytes, err := enrollmentSpec.Component.GetCATLSBytes() + if err != nil { + return err + } + + cryptos.Enrollment, err = enroller.Factory(enrollmentSpec.Component, i.Client, instance, + filepath.Join(storagePath, "ecert"), + i.Scheme, + bytes, + i.Timeouts, + ) + if err != nil { + return err + } + } + + // err := common.GetSWEnrollers(cryptos, enrollmentSpec, storagePath) + err := common.GetCommonEnrollers(cryptos, enrollmentSpec, storagePath) + if err != nil { + return err + } + + return nil +} + +func (i *Initializer) GetMSPCrypto(cryptos *config.Cryptos, instance *current.IBPOrderer) error { + mspSpec := instance.Spec.Secret.MSP + if mspSpec != nil { + err := common.GetMSPCrypto(cryptos, mspSpec) + if err != nil { + return err + } + } + + return nil +} + +func (i *Initializer) GetInitOrderer(instance *current.IBPOrderer, storagePath string) (*Orderer, error) { + cryptos := &config.Cryptos{} + + if instance.Spec.Secret != nil { + // Prioritize any crypto passed through MSP spec first + err := i.GetMSPCrypto(cryptos, instance) + if err != nil { + return nil, errors.Wrap(err, "failed to populate init orderer with MSP spec") + } + + err = i.GetEnrollers(cryptos, instance, storagePath) + if err != nil { + return nil, errors.Wrap(err, "failed to populate init orderer with Enrollment spec") + } + } + + return &Orderer{ + Cryptos: cryptos, + }, nil +} + +func (i *Initializer) GetUpdatedOrderer(instance *current.IBPOrderer) (*Orderer, error) { + cryptos := &config.Cryptos{} + + // Only check for any new certs passed through MSP spec + err := i.GetMSPCrypto(cryptos, instance) + if err != nil { + return nil, errors.Wrap(err, "failed to populate updated init orderer with MSP spec") + } + + return &Orderer{ + Cryptos: cryptos, + }, nil +} + +func (i *Initializer) GenerateSecrets(prefix common.SecretType, instance *current.IBPOrderer, crypto *config.Response) error { + if crypto == nil { + return nil + } + return i.SecretManager.GenerateSecrets(prefix, instance, crypto) +} + +func (i *Initializer) GenerateSecretsFromResponse(instance *current.IBPOrderer, cryptoResponse *config.CryptoResponse) error { + return i.SecretManager.GenerateSecretsFromResponse(instance, cryptoResponse) +} + +func (i *Initializer) UpdateSecrets(prefix common.SecretType, instance *current.IBPOrderer, crypto *config.Response) error { + if crypto == nil { + return nil + } + return i.SecretManager.UpdateSecrets(prefix, instance, crypto) +} + +func (i *Initializer) UpdateSecretsFromResponse(instance *current.IBPOrderer, cryptoResponse *config.CryptoResponse) error { + return i.SecretManager.UpdateSecretsFromResponse(instance, cryptoResponse) +} + +func (i *Initializer) GetCrypto(instance *current.IBPOrderer) (*config.CryptoResponse, error) { + return i.SecretManager.GetCryptoResponseFromSecrets(instance) +} + +func (i *Initializer) Delete(instance *current.IBPOrderer) error { + name := fmt.Sprintf("%s%s", instance.Name, i.Name) + prefix := "ecert" + err := i.SecretManager.DeleteSecrets(prefix, instance, name) + if err != nil { + return err + } + + prefix = "tls" + err = i.SecretManager.DeleteSecrets(prefix, instance, name) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{} + cm.Name = instance.Name + "-" + i.Name + "-config" + cm.Namespace = instance.Namespace + + err = i.Client.Delete(context.TODO(), cm) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete config map '%s'", cm.Name) + } + } + + return nil +} + +func (i *Initializer) MissingCrypto(instance *current.IBPOrderer) bool { + isHSMEnabled := instance.IsHSMEnabled() + if isHSMEnabled { + i.Validator.SetHSMEnabled(true) + } + + checkClientAuth := instance.ClientAuthCryptoSet() + err := common.CheckCrypto(i.Validator, instance, checkClientAuth) + if err != nil { + log.Info(err.Error()) + return true + } + + return false +} + +func (i *Initializer) CreateOrUpdateConfigMap(instance *current.IBPOrderer, orderer OrdererConfig) error { + name := fmt.Sprintf("%s-config", instance.GetName()) + log.Info(fmt.Sprintf("Creating/Updating config map '%s'...", name)) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: i.GetLabels(instance), + }, + BinaryData: map[string][]byte{}, + } + + existing, err := i.GetConfigFromConfigMap(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + if existing != nil { + cm.BinaryData = existing.BinaryData + } + + if orderer != nil { + err := i.addOrdererConfigToCM(instance, cm, orderer) + if err != nil { + return err + } + } + + err = i.addNodeOUToCM(instance, cm) + if err != nil { + return err + } + + err = i.Client.CreateOrUpdate(context.TODO(), cm, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: i.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create Orderer config map") + } + + return nil +} + +func (i *Initializer) addOrdererConfigToCM(instance *current.IBPOrderer, cm *corev1.ConfigMap, orderer OrdererConfig) error { + ordererBytes, err := orderer.ToBytes() + if err != nil { + return err + } + cm.BinaryData["orderer.yaml"] = ordererBytes + + return nil +} + +func (i *Initializer) addNodeOUToCM(instance *current.IBPOrderer, cm *corev1.ConfigMap) error { + if !instance.Spec.NodeOUDisabled() { + configFilePath := i.Config.OUFile + // Check if both intermediate ecerts and tlscerts secrets exists + if util.IntermediateSecretExists(i.Client, instance.Namespace, fmt.Sprintf("ecert-%s-intercerts", instance.Name)) && + util.IntermediateSecretExists(i.Client, instance.Namespace, fmt.Sprintf("tls-%s-intercerts", instance.Name)) { + configFilePath = i.Config.InterOUFile + } + ouBytes, err := ioutil.ReadFile(filepath.Clean(configFilePath)) + if err != nil { + return err + } + cm.BinaryData["config.yaml"] = ouBytes + } else { + // Set enabled to false in config + nodeOUConfig, err := config.NodeOUConfigFromBytes(cm.BinaryData["config.yaml"]) + if err != nil { + return err + } + + nodeOUConfig.NodeOUs.Enable = false + ouBytes, err := config.NodeOUConfigToBytes(nodeOUConfig) + if err != nil { + return err + } + + cm.BinaryData["config.yaml"] = ouBytes + } + + return nil +} + +func (i *Initializer) GetConfigFromConfigMap(instance *current.IBPOrderer) (*corev1.ConfigMap, error) { + return common.GetConfigFromConfigMap(i.Client, instance) +} + +func GetDomain(address string) string { + u := strings.Split(address, ":") + return u[0] +} + +func (i *Initializer) GetLabels(instance metav1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app": instance.GetName(), + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "orderer", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (i *Initializer) CheckIfAdminCertsUpdated(instance *current.IBPOrderer) (bool, error) { + log.Info("Checking if admin certs updated") + current := common.GetAdminCertsFromSecret(i.Client, instance) + updated := common.GetAdminCertsFromSpec(instance.Spec.Secret) + + return common.CheckIfCertsDifferent(current, updated) +} + +func (i *Initializer) UpdateAdminSecret(instance *current.IBPOrderer) error { + return i.SecretManager.UpdateAdminCertSecret(instance, instance.Spec.Secret) +} + +func (i *Initializer) GetCoreConfigFromFile(instance *current.IBPOrderer, file string) (OrdererConfig, error) { + switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { + case version.V2: + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + log.Info("v2.4.x Fabric Orderer requested") + v24config, err := v24ordererconfig.ReadOrdererFile(file) + if err != nil { + return nil, errors.Wrap(err, "failed to read v2.4.x default config file") + } + return v24config, nil + } else { + log.Info("v2.2.x Fabric Orderer requested") + v2config, err := v2ordererconfig.ReadOrdererFile(file) + if err != nil { + return nil, errors.Wrap(err, "failed to read v2.2.x default config file") + } + return v2config, nil + } + case version.V1: + fallthrough + default: + // Choosing to default to v1.4 to not break backwards comptability, if coming + // from a previous version of operator the 'FabricVersion' field would not be set and would + // result in an error. // TODO: Determine if we want to throw error or handle setting + // FabricVersion as part of migration logic. + log.Info("v1.4 Fabric Orderer requested") + oconfig, err := ordererconfig.ReadOrdererFile(file) + if err != nil { + return nil, errors.Wrap(err, "failed to read v1.4 default config file") + } + return oconfig, nil + } +} + +func (i *Initializer) GetCoreConfigFromBytes(instance *current.IBPOrderer, bytes []byte) (OrdererConfig, error) { + switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { + case version.V2: + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + log.Info("v2.4.x Fabric Orderer requested") + v24config, err := v24ordererconfig.ReadOrdererFromBytes(bytes) + if err != nil { + return nil, err + } + return v24config, nil + } else { + log.Info("v2.2.x Fabric Orderer requested") + v2config, err := v2ordererconfig.ReadOrdererFromBytes(bytes) + if err != nil { + return nil, err + } + return v2config, nil + } + case version.V1: + fallthrough + default: + // Choosing to default to v1.4 to not break backwards comptability, if coming + // from a previous version of operator the 'FabricVersion' field would not be set and would + // result in an error. + log.Info("v1.4 Fabric Orderer requested") + oconfig, err := ordererconfig.ReadOrdererFromBytes(bytes) + if err != nil { + return nil, err + } + return oconfig, nil + } +} diff --git a/pkg/initializer/orderer/initializer_test.go b/pkg/initializer/orderer/initializer_test.go new file mode 100644 index 00000000..0a036574 --- /dev/null +++ b/pkg/initializer/orderer/initializer_test.go @@ -0,0 +1,220 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "context" + "encoding/base64" + "os" + "path/filepath" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + commonmocks "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mocks" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + testkey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" +) + +var _ = Describe("Initializing the Orderer", func() { + var ( + ordererInitializer *initializer.Initializer + instance *current.IBPOrderer + mockClient *controllermocks.Client + mockValidator *commonmocks.CryptoValidator + ) + + BeforeEach(func() { + testCertBytes, err := base64.StdEncoding.DecodeString(testcert) + Expect(err).NotTo(HaveOccurred()) + + mockValidator = &commonmocks.CryptoValidator{} + + mockClient = &controllermocks.Client{} + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + s := obj.(*corev1.Secret) + s.Data = map[string][]byte{"cert.pem": testCertBytes} + } + return nil + } + + ordererInitializer = initializer.New(mockClient, &runtime.Scheme{}, nil, "", mockValidator) + + enrollment := ¤t.Enrollment{ + CAHost: "localhost", + CAPort: "7054", + EnrollID: "admin", + EnrollSecret: "adminpw", + CATLS: ¤t.CATLS{ + CACert: testcert, + }, + } + tlsenrollment := enrollment.DeepCopy() + + msp := ¤t.MSP{ + KeyStore: testkey, + SignCerts: testcert, + AdminCerts: []string{testcert}, + CACerts: []string{testcert}, + } + tlsmsp := msp.DeepCopy() + + instance = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: current.IBPOrdererSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: enrollment, + TLS: tlsenrollment, + ClientAuth: ¤t.Enrollment{ + CAHost: "host", + CAPort: "1234", + EnrollID: "admin", + EnrollSecret: "adminpw", + CATLS: ¤t.CATLS{ + CACert: "cert", + }, + }, + }, + MSP: ¤t.MSPSpec{ + Component: msp, + TLS: tlsmsp, + ClientAuth: ¤t.MSP{ + KeyStore: "key", + SignCerts: "cert", + CACerts: []string{"certs"}, + }, + }, + }, + }, + } + }) + + PContext("create", func() { + // TODO + }) + + PContext("update", func() { + // TODO + }) + + Context("check for missing crypto", func() { + It("returns true, if missing any crypto", func() { + mockValidator.CheckEcertCryptoReturns(errors.New("not found")) + missing := ordererInitializer.MissingCrypto(instance) + Expect(missing).To(Equal(true)) + }) + + It("returns false, if all crypto found and is in proper format", func() { + missing := ordererInitializer.MissingCrypto(instance) + Expect(missing).To(Equal(false)) + }) + }) + + Context("get init orderer", func() { + It("returns empty init peer if neither MSP nor enrollment spec is passed", func() { + instance.Spec.Secret.MSP.TLS = nil + instance.Spec.Secret.Enrollment.TLS = nil + initorderer, err := ordererInitializer.GetInitOrderer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initorderer.Cryptos).NotTo(BeNil()) + Expect(initorderer.Cryptos.TLS).To(BeNil()) + }) + + It("returns init peer with ecert, tls, clientauth enrollers", func() { + initorderer, err := ordererInitializer.GetInitOrderer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initorderer.Cryptos).NotTo(BeNil()) + Expect(initorderer.Cryptos.Enrollment).NotTo(BeNil()) + Expect(initorderer.Cryptos.TLS).NotTo(BeNil()) + Expect(initorderer.Cryptos.ClientAuth).NotTo(BeNil()) + }) + + It("returns init peer with ecert, tls, clientauth msp parsers", func() { + initorderer, err := ordererInitializer.GetInitOrderer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initorderer.Cryptos).NotTo(BeNil()) + Expect(initorderer.Cryptos.Enrollment).NotTo(BeNil()) + Expect(initorderer.Cryptos.TLS).NotTo(BeNil()) + Expect(initorderer.Cryptos.ClientAuth).NotTo(BeNil()) + }) + + It("returns ecert msp parsers and tls enrollers", func() { + instance.Spec.Secret.Enrollment.Component = nil + instance.Spec.Secret.MSP.TLS = nil + initorderer, err := ordererInitializer.GetInitOrderer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initorderer.Cryptos).NotTo(BeNil()) + Expect(initorderer.Cryptos.Enrollment).NotTo(BeNil()) + Expect(initorderer.Cryptos.TLS).NotTo(BeNil()) + }) + }) + + Context("create or update config map", func() { + BeforeEach(func() { + wd, err := os.Getwd() + Expect(err).NotTo(HaveOccurred()) + + ordererInitializer.Config = &initializer.Config{ + OUFile: filepath.Join(wd, "../../../defaultconfig/orderer/ouconfig.yaml"), + InterOUFile: filepath.Join(wd, "../../../defaultconfig/orderer/ouconfig-inter.yaml"), + } + + // Trigger create config map logic + mockClient.GetReturns(k8serrors.NewNotFound(schema.GroupResource{}, "not found")) + }) + + It("returns error if failed to create config map", func() { + mockClient.CreateOrUpdateReturns(errors.New("update error")) + err := ordererInitializer.CreateOrUpdateConfigMap(instance, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("update error")) + }) + + It("creates config map with node ou config", func() { + err := ordererInitializer.CreateOrUpdateConfigMap(instance, nil) + Expect(err).NotTo(HaveOccurred()) + + _, obj, _ := mockClient.CreateOrUpdateArgsForCall(0) + cm := obj.(*corev1.ConfigMap) + Expect(cm.BinaryData["config.yaml"]).NotTo(BeNil()) + nodeOUs, err := commonconfig.NodeOUConfigFromBytes(cm.BinaryData["config.yaml"]) + Expect(nodeOUs.NodeOUs.Enable).To(Equal(true)) + }) + }) + +}) diff --git a/pkg/initializer/orderer/mocks/ibporderer.go b/pkg/initializer/orderer/mocks/ibporderer.go new file mode 100644 index 00000000..5b30ae11 --- /dev/null +++ b/pkg/initializer/orderer/mocks/ibporderer.go @@ -0,0 +1,247 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" +) + +type IBPOrderer struct { + GenerateCryptoStub func() (*config.CryptoResponse, error) + generateCryptoMutex sync.RWMutex + generateCryptoArgsForCall []struct { + } + generateCryptoReturns struct { + result1 *config.CryptoResponse + result2 error + } + generateCryptoReturnsOnCall map[int]struct { + result1 *config.CryptoResponse + result2 error + } + GetConfigStub func() initializer.OrdererConfig + getConfigMutex sync.RWMutex + getConfigArgsForCall []struct { + } + getConfigReturns struct { + result1 initializer.OrdererConfig + } + getConfigReturnsOnCall map[int]struct { + result1 initializer.OrdererConfig + } + OverrideConfigStub func(initializer.OrdererConfig) error + overrideConfigMutex sync.RWMutex + overrideConfigArgsForCall []struct { + arg1 initializer.OrdererConfig + } + overrideConfigReturns struct { + result1 error + } + overrideConfigReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *IBPOrderer) GenerateCrypto() (*config.CryptoResponse, error) { + fake.generateCryptoMutex.Lock() + ret, specificReturn := fake.generateCryptoReturnsOnCall[len(fake.generateCryptoArgsForCall)] + fake.generateCryptoArgsForCall = append(fake.generateCryptoArgsForCall, struct { + }{}) + stub := fake.GenerateCryptoStub + fakeReturns := fake.generateCryptoReturns + fake.recordInvocation("GenerateCrypto", []interface{}{}) + fake.generateCryptoMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *IBPOrderer) GenerateCryptoCallCount() int { + fake.generateCryptoMutex.RLock() + defer fake.generateCryptoMutex.RUnlock() + return len(fake.generateCryptoArgsForCall) +} + +func (fake *IBPOrderer) GenerateCryptoCalls(stub func() (*config.CryptoResponse, error)) { + fake.generateCryptoMutex.Lock() + defer fake.generateCryptoMutex.Unlock() + fake.GenerateCryptoStub = stub +} + +func (fake *IBPOrderer) GenerateCryptoReturns(result1 *config.CryptoResponse, result2 error) { + fake.generateCryptoMutex.Lock() + defer fake.generateCryptoMutex.Unlock() + fake.GenerateCryptoStub = nil + fake.generateCryptoReturns = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *IBPOrderer) GenerateCryptoReturnsOnCall(i int, result1 *config.CryptoResponse, result2 error) { + fake.generateCryptoMutex.Lock() + defer fake.generateCryptoMutex.Unlock() + fake.GenerateCryptoStub = nil + if fake.generateCryptoReturnsOnCall == nil { + fake.generateCryptoReturnsOnCall = make(map[int]struct { + result1 *config.CryptoResponse + result2 error + }) + } + fake.generateCryptoReturnsOnCall[i] = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *IBPOrderer) GetConfig() initializer.OrdererConfig { + fake.getConfigMutex.Lock() + ret, specificReturn := fake.getConfigReturnsOnCall[len(fake.getConfigArgsForCall)] + fake.getConfigArgsForCall = append(fake.getConfigArgsForCall, struct { + }{}) + stub := fake.GetConfigStub + fakeReturns := fake.getConfigReturns + fake.recordInvocation("GetConfig", []interface{}{}) + fake.getConfigMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPOrderer) GetConfigCallCount() int { + fake.getConfigMutex.RLock() + defer fake.getConfigMutex.RUnlock() + return len(fake.getConfigArgsForCall) +} + +func (fake *IBPOrderer) GetConfigCalls(stub func() initializer.OrdererConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = stub +} + +func (fake *IBPOrderer) GetConfigReturns(result1 initializer.OrdererConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = nil + fake.getConfigReturns = struct { + result1 initializer.OrdererConfig + }{result1} +} + +func (fake *IBPOrderer) GetConfigReturnsOnCall(i int, result1 initializer.OrdererConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = nil + if fake.getConfigReturnsOnCall == nil { + fake.getConfigReturnsOnCall = make(map[int]struct { + result1 initializer.OrdererConfig + }) + } + fake.getConfigReturnsOnCall[i] = struct { + result1 initializer.OrdererConfig + }{result1} +} + +func (fake *IBPOrderer) OverrideConfig(arg1 initializer.OrdererConfig) error { + fake.overrideConfigMutex.Lock() + ret, specificReturn := fake.overrideConfigReturnsOnCall[len(fake.overrideConfigArgsForCall)] + fake.overrideConfigArgsForCall = append(fake.overrideConfigArgsForCall, struct { + arg1 initializer.OrdererConfig + }{arg1}) + stub := fake.OverrideConfigStub + fakeReturns := fake.overrideConfigReturns + fake.recordInvocation("OverrideConfig", []interface{}{arg1}) + fake.overrideConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPOrderer) OverrideConfigCallCount() int { + fake.overrideConfigMutex.RLock() + defer fake.overrideConfigMutex.RUnlock() + return len(fake.overrideConfigArgsForCall) +} + +func (fake *IBPOrderer) OverrideConfigCalls(stub func(initializer.OrdererConfig) error) { + fake.overrideConfigMutex.Lock() + defer fake.overrideConfigMutex.Unlock() + fake.OverrideConfigStub = stub +} + +func (fake *IBPOrderer) OverrideConfigArgsForCall(i int) initializer.OrdererConfig { + fake.overrideConfigMutex.RLock() + defer fake.overrideConfigMutex.RUnlock() + argsForCall := fake.overrideConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *IBPOrderer) OverrideConfigReturns(result1 error) { + fake.overrideConfigMutex.Lock() + defer fake.overrideConfigMutex.Unlock() + fake.OverrideConfigStub = nil + fake.overrideConfigReturns = struct { + result1 error + }{result1} +} + +func (fake *IBPOrderer) OverrideConfigReturnsOnCall(i int, result1 error) { + fake.overrideConfigMutex.Lock() + defer fake.overrideConfigMutex.Unlock() + fake.OverrideConfigStub = nil + if fake.overrideConfigReturnsOnCall == nil { + fake.overrideConfigReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.overrideConfigReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *IBPOrderer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.generateCryptoMutex.RLock() + defer fake.generateCryptoMutex.RUnlock() + fake.getConfigMutex.RLock() + defer fake.getConfigMutex.RUnlock() + fake.overrideConfigMutex.RLock() + defer fake.overrideConfigMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *IBPOrderer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ initializer.IBPOrderer = new(IBPOrderer) diff --git a/pkg/initializer/orderer/orderer.go b/pkg/initializer/orderer/orderer.go new file mode 100644 index 00000000..8dd23534 --- /dev/null +++ b/pkg/initializer/orderer/orderer.go @@ -0,0 +1,73 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/pkg/errors" +) + +type OrdererConfig interface { + MergeWith(interface{}, bool) error + ToBytes() ([]byte, error) + UsingPKCS11() bool + SetPKCS11Defaults(bool) + GetBCCSPSection() *commonapi.BCCSP + SetDefaultKeyStore() + SetBCCSPLibrary(string) +} + +type Orderer struct { + Config OrdererConfig + Cryptos *commonconfig.Cryptos + UsingHSMProxy bool +} + +func (o *Orderer) OverrideConfig(newConfig OrdererConfig) (err error) { + if newConfig == nil { + return nil + } + + log.Info("Overriding orderer config values from spec") + err = o.Config.MergeWith(newConfig, o.UsingHSMProxy) + if err != nil { + return errors.Wrapf(err, "failed to merge override configuration") + } + + return nil +} + +func (o *Orderer) GenerateCrypto() (*commonconfig.CryptoResponse, error) { + log.Info("Generating orderer's crypto material") + if o.Cryptos != nil { + response, err := o.Cryptos.GenerateCryptoResponse() + if err != nil { + return nil, err + } + return response, nil + } + + return &config.CryptoResponse{}, nil +} + +func (o *Orderer) GetConfig() OrdererConfig { + return o.Config +} diff --git a/pkg/initializer/orderer/orderer_suite_test.go b/pkg/initializer/orderer/orderer_suite_test.go new file mode 100644 index 00000000..6bb2ed17 --- /dev/null +++ b/pkg/initializer/orderer/orderer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOrderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Orderer Suite") +} diff --git a/pkg/initializer/peer/config/commoncore/commoncore_suite_test.go b/pkg/initializer/peer/config/commoncore/commoncore_suite_test.go new file mode 100644 index 00000000..aec5ce87 --- /dev/null +++ b/pkg/initializer/peer/config/commoncore/commoncore_suite_test.go @@ -0,0 +1,30 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package commoncore_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCommoncore(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Commoncore Suite") +} diff --git a/pkg/initializer/peer/config/commoncore/commoncore_test.go b/pkg/initializer/peer/config/commoncore/commoncore_test.go new file mode 100644 index 00000000..2232dacd --- /dev/null +++ b/pkg/initializer/peer/config/commoncore/commoncore_test.go @@ -0,0 +1,257 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package commoncore_test + +import ( + "io/ioutil" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/yaml" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + peerv1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + peerv2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/commoncore" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" +) + +var _ = Describe("Common", func() { + + Context("convert bootstrap from string to string array", func() { + Context("file", func() { + It("converts core file", func() { + bytes, err := ioutil.ReadFile("testdata/test_core.yaml") + Expect(err).NotTo(HaveOccurred()) + + newBytes, err := commoncore.ConvertBootstrapToArray(bytes) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + + By("converting bootstrap into a string array", func() { + Expect(coreStruct.Peer.Gossip.Bootstrap).To(Equal([]string{"127.0.0.1:7051"})) + }) + + By("persisting the remainder of the struct", func() { + Expect(coreStruct.Chaincode).NotTo(Equal(peerv2.Chaincode{})) + Expect(coreStruct.VM).NotTo(Equal(peerv1.VM{})) + Expect(coreStruct.Ledger).NotTo(Equal(peerv2.Ledger{})) + // Sanity check some of the core components + Expect(coreStruct.Operations).To(Equal(peerv1.Operations{ + ListenAddress: "127.0.0.1:9443", + TLS: peerv1.OperationsTLS{ + Enabled: pointer.False(), + Certificate: peerv1.File{ + File: "cert.pem", + }, + PrivateKey: peerv1.File{ + File: "key.pem", + }, + ClientAuthRequired: pointer.False(), + ClientRootCAs: peerv1.Files{ + Files: []string{"rootcert.pem"}, + }, + }, + })) + Expect(coreStruct.Metrics).To(Equal(peerv1.Metrics{ + Provider: "prometheus", + Statsd: peerv1.Statsd{ + Network: "udp", + Address: "127.0.0.1:8125", + WriteInterval: common.MustParseDuration("10s"), + Prefix: "", + }, + })) + }) + }) + + It("returns config if bootstrap is already []string", func() { + bytes, err := ioutil.ReadFile("testdata/test_core_no_change.yaml") + Expect(err).NotTo(HaveOccurred()) + + newBytes, err := commoncore.ConvertBootstrapToArray(bytes) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + Expect(coreStruct.Peer.Gossip.Bootstrap).To(Equal([]string{"1.2.3.4"})) + }) + + It("returns config if peer is not present in config", func() { + bytes, err := ioutil.ReadFile("testdata/test_core_no_peer.yaml") + Expect(err).NotTo(HaveOccurred()) + + newBytes, err := commoncore.ConvertBootstrapToArray(bytes) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + By("not setting anything in core.peer", func() { + Expect(coreStruct.Peer).To(Equal(peerv2.Peer{})) + }) + + By("persisting existing config", func() { + Expect(coreStruct.Chaincode).NotTo(Equal(peerv2.Chaincode{})) + Expect(coreStruct.VM).NotTo(Equal(peerv1.VM{})) + Expect(coreStruct.Ledger).NotTo(Equal(peerv2.Ledger{})) + // Sanity check some of the core components + Expect(coreStruct.Operations).To(Equal(peerv1.Operations{ + ListenAddress: "127.0.0.1:9443", + TLS: peerv1.OperationsTLS{ + Enabled: pointer.False(), + Certificate: peerv1.File{ + File: "cert.pem", + }, + PrivateKey: peerv1.File{ + File: "key.pem", + }, + ClientAuthRequired: pointer.False(), + ClientRootCAs: peerv1.Files{ + Files: []string{"rootcert.pem"}, + }, + }, + })) + Expect(coreStruct.Metrics).To(Equal(peerv1.Metrics{ + Provider: "prometheus", + Statsd: peerv1.Statsd{ + Network: "udp", + Address: "127.0.0.1:8125", + WriteInterval: common.MustParseDuration("10s"), + Prefix: "", + }, + })) + }) + + }) + }) + + Context("bytes", func() { + var ( + coreBytes []byte + err error + ) + + BeforeEach(func() { + testCore := &TestCore{ + Peer: Peer{ + Gossip: Gossip{ + Bootstrap: "1.2.3.4", + }, + }, + } + coreBytes, err = yaml.Marshal(testCore) + Expect(err).NotTo(HaveOccurred()) + }) + + It("converts core bytes", func() { + newBytes, err := commoncore.ConvertBootstrapToArray(coreBytes) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + Expect(coreStruct.Peer.Gossip.Bootstrap).To(Equal([]string{"1.2.3.4"})) + }) + + It("returns same bytes if peer.gossip.bootstrap not found", func() { + core := map[string]interface{}{ + "chaincode": map[string]interface{}{ + "pull": true, + }, + } + bytes, err := yaml.Marshal(core) + Expect(err).NotTo(HaveOccurred()) + + newBytes, err := commoncore.ConvertBootstrapToArray(bytes) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + trueVal := true + Expect(coreStruct.Peer).To(Equal(peerv2.Peer{})) + Expect(coreStruct.Chaincode).To(Equal(peerv2.Chaincode{ + Pull: &trueVal, + })) + }) + }) + + Context("interface", func() { + var ( + intf interface{} + ) + + BeforeEach(func() { + intf = &TestCore{ + Peer: Peer{ + Gossip: Gossip{ + Bootstrap: "1.2.3.4", + }, + }, + } + }) + + It("converts interface", func() { + newBytes, err := commoncore.ConvertBootstrapToArray(intf) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + Expect(coreStruct.Peer.Gossip.Bootstrap).To(Equal([]string{"1.2.3.4"})) + }) + + It("returns config if no conversion required", func() { + intf = &TestCore{ + Peer: Peer{ + Gossip: Gossip{}, + }, + } + newBytes, err := commoncore.ConvertBootstrapToArray(intf) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + Expect(coreStruct.Peer.Gossip).To(Equal(peerv2.Gossip{})) + }) + + It("converts json raw message", func() { + rawMsg, err := util.ConvertToJsonMessage(intf) + Expect(err).NotTo(HaveOccurred()) + newBytes, err := commoncore.ConvertBootstrapToArray(rawMsg) + Expect(err).NotTo(HaveOccurred()) + + coreStruct := bytesToCore(newBytes) + Expect(coreStruct.Peer.Gossip.Bootstrap).To(Equal([]string{"1.2.3.4"})) + }) + }) + + }) +}) + +func bytesToCore(bytes []byte) *peerv2.Core { + coreStruct := &peerv2.Core{} + err := yaml.Unmarshal(bytes, coreStruct) + Expect(err).NotTo(HaveOccurred()) + return coreStruct +} + +type TestCore struct { + Peer Peer `json:"peer"` +} + +type Peer struct { + Gossip Gossip `json:"gossip"` +} + +type Gossip struct { + Bootstrap string `json:"bootstrap"` +} diff --git a/pkg/initializer/peer/config/commoncore/core.go b/pkg/initializer/peer/config/commoncore/core.go new file mode 100644 index 00000000..641540ac --- /dev/null +++ b/pkg/initializer/peer/config/commoncore/core.go @@ -0,0 +1,92 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package commoncore + +import ( + "sigs.k8s.io/yaml" +) + +func ConvertBootstrapToArray(intf interface{}) ([]byte, error) { + if intf == nil { + return nil, nil + } + + if coreBytes, ok := intf.([]byte); ok { + return convertBootstrapToArray(coreBytes) + } + + bytes, err := yaml.Marshal(intf) + if err != nil { + return nil, err + } + + return convertBootstrapToArray(bytes) +} + +// convertBootstrapToArray returns an updated core config where peer.gossip.bootstrap is +// an array of strings ([]string) instead of a string. +// +// Peer.gossip.bootstrap can be passed to the operator as a string or []string in the peer's +// core config; however, the operator defines the field in the Core config struct definition +// as a []string due to how Fabric parses the field +// (https://github.com/hyperledger/fabric/blob/release-1.4/peer/node/start.go#L897). +func convertBootstrapToArray(coreBytes []byte) ([]byte, error) { + if coreBytes == nil { + return nil, nil + } + + type Core map[string]interface{} + + coreObj := Core{} + err := yaml.Unmarshal(coreBytes, &coreObj) + if err != nil { + return nil, err + } + + peer, ok := coreObj["peer"].(map[string]interface{}) + if peer == nil { + // If peer not found, simply return original config + return coreBytes, nil + } + + gossip, ok := peer["gossip"].(map[string]interface{}) + if !ok { + // If peer.gossip not found, simply return original config + return coreBytes, nil + } + + bootstrap, ok := gossip["bootstrap"].(string) + if !ok { + // If peer.gossip.bootstrap not found or unable to be converted + // into a string, simply return original config + return coreBytes, nil + } + + if bootstrap == "" { + gossip["bootstrap"] = nil + } else { + gossip["bootstrap"] = []string{bootstrap} + } + + newCore, err := yaml.Marshal(coreObj) + if err != nil { + return nil, err + } + + return newCore, nil +} diff --git a/pkg/initializer/peer/config/commoncore/testdata/test_core.yaml b/pkg/initializer/peer/config/commoncore/testdata/test_core.yaml new file mode 100644 index 00000000..9baba3c5 --- /dev/null +++ b/pkg/initializer/peer/config/commoncore/testdata/test_core.yaml @@ -0,0 +1,706 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer listenAddress. + chaincodeAddress: 0.0.0.0:7053 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7054 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + addressAutoDetect: true + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. It is recommended to + # use leader election for large networks of peers. + useLeaderElection: true + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: "endpoint1" + # These need to be overridden with the FQDN of the peer + address: "address1" + externaladdress: "externaladdress1" + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: 2s + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: true + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: "externalEndpoint1" + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: true + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actually buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: "keystore1" + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: "library1" + # Token Label + Label: "label1" + # User PIN + Pin: "1234" + Hash: SHA2 + Security: 256 + FileKeyStore: + KeyStore: "keystore2" + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: 5 + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: true + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running system chaincode requests. + # This option is only supported for qscc at this time. + concurrency: + qscc: 5000 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(PROJECT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: true + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(PROJECT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:latest + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:latest + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: [] + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + _lifecycle: enable + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: "cert.pem" + + # path to PEM encoded server key for the operations server + key: + file: "key.pem" + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: + - "rootcert.pem" + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/pkg/initializer/peer/config/commoncore/testdata/test_core_no_change.yaml b/pkg/initializer/peer/config/commoncore/testdata/test_core_no_change.yaml new file mode 100644 index 00000000..111a52f5 --- /dev/null +++ b/pkg/initializer/peer/config/commoncore/testdata/test_core_no_change.yaml @@ -0,0 +1,5 @@ + +peer: + gossip: + bootstrap: + - "1.2.3.4" diff --git a/pkg/initializer/peer/config/commoncore/testdata/test_core_no_peer.yaml b/pkg/initializer/peer/config/commoncore/testdata/test_core_no_peer.yaml new file mode 100644 index 00000000..aa843b0f --- /dev/null +++ b/pkg/initializer/peer/config/commoncore/testdata/test_core_no_peer.yaml @@ -0,0 +1,294 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +############################################################################### +# +# Peer section +# +############################################################################### + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(PROJECT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: true + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(PROJECT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:latest + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:latest + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: [] + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + _lifecycle: enable + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: "cert.pem" + + # path to PEM encoded server key for the operations server + key: + file: "key.pem" + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: + - "rootcert.pem" + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/pkg/initializer/peer/config/v1/config.go b/pkg/initializer/peer/config/v1/config.go new file mode 100644 index 00000000..8d982d6f --- /dev/null +++ b/pkg/initializer/peer/config/v1/config.go @@ -0,0 +1,163 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Core struct { + v1.Core `json:",inline"` + addrOverrides []AddressOverride +} + +func (c *Core) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(c) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (c *Core) WriteToFile(path string) error { + bytes, err := yaml.Marshal(c) + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Clean(path), bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (c *Core) MergeWith(newConfig interface{}, UsingHSMProxy bool) error { + newCore := newConfig.(*Core) + + if newCore != nil { + err := merge.WithOverwrite(c, newCore) + if err != nil { + return errors.Wrapf(err, "failed to merge peer configuration overrides") + } + } + + if c.UsingPKCS11() { + c.SetPKCS11Defaults(UsingHSMProxy) + } + + dc := DeliveryClient{DeliveryClient: c.Peer.DeliveryClient} + addrOverrides, err := dc.HandleCAcertsFiles() + if err != nil { + return errors.Wrapf(err, "failed to convert base64 certs to filepath") + } + c.Peer.DeliveryClient = dc.DeliveryClient + c.addrOverrides = addrOverrides + + return nil +} + +func (c *Core) DeepCopyInto(into *Core) { + b, err := json.Marshal(c) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (c *Core) DeepCopy() *Core { + if c == nil { + return nil + } + out := new(Core) + c.DeepCopyInto(out) + return out +} + +func (c *Core) UsingPKCS11() bool { + if c.Peer.BCCSP != nil { + if strings.ToLower(c.Peer.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (c *Core) SetPKCS11Defaults(usingHSMProxy bool) { + if c.Peer.BCCSP.PKCS11 == nil { + c.Peer.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + if usingHSMProxy { + c.Peer.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if c.Peer.BCCSP.PKCS11.HashFamily == "" { + c.Peer.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if c.Peer.BCCSP.PKCS11.SecLevel == 0 { + c.Peer.BCCSP.PKCS11.SecLevel = 256 + } + + c.Peer.BCCSP.PKCS11.SoftVerify = true +} + +func (c *Core) SetDefaultKeyStore() { + if c.Peer.BCCSP.PKCS11 != nil { + c.Peer.BCCSP.PKCS11.FileKeyStore = &common.FileKeyStoreOpts{ + KeyStorePath: "msp/keystore", + } + } +} + +func (c *Core) GetAddressOverrides() []AddressOverride { + return c.addrOverrides +} + +func (c *Core) GetBCCSPSection() *common.BCCSP { + return c.Peer.BCCSP +} + +func (c *Core) GetMaxNameLength() *int { + return c.MaxNameLength +} + +func (c *Core) SetBCCSPLibrary(library string) { + if c.Peer.BCCSP.PKCS11 == nil { + c.Peer.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + c.Peer.BCCSP.PKCS11.Library = library +} diff --git a/pkg/initializer/peer/config/v1/config_suite_test.go b/pkg/initializer/peer/config/v1/config_suite_test.go new file mode 100644 index 00000000..7969f062 --- /dev/null +++ b/pkg/initializer/peer/config/v1/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Config Suite") +} diff --git a/pkg/initializer/peer/config/v1/config_test.go b/pkg/initializer/peer/config/v1/config_test.go new file mode 100644 index 00000000..d14de050 --- /dev/null +++ b/pkg/initializer/peer/config/v1/config_test.go @@ -0,0 +1,662 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + certB64 = "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdFJBUDlMemUyZEc1cm1rbmcvdVVtREFZU0VwUElqRFdUUDhqUjMxcUJ5Yjc3YWUrCnk3UTRvRnZod1lDVUhsUWVTWjFKeTdUUHpEcitoUk5hdDJYNGdGYUpGYmVFbC9DSHJ3Rk1mNzNzQStWV1pHdnkKdXhtbjB2bEdYMW5zSEo5aUdIUS9qR2FvV1FJYzlVbnpHWi8yWStlZkpxOWd3cDBNemFzWWZkdXordXVBNlp4VAp5TTdDOWFlWmxYL2ZMYmVkSXVXTzVzaXhPSlZQeUVpcWpkd0RiY1AxYy9mRCtSMm1DbmM3VGovSnVLK1poTGxPCnhGcVlFRmtROHBmSi9LY1pabVF1QURZVFh6RGp6OENxcTRTRU5ySzI0b2hQQkN2SGgyanplWjhGdGR4MmpSSFQKaXdCZWZEYWlSWVBSOUM4enk4K1Z2Wmt6S0hQV3N5aENiNUMrN1FJREFRQUJBb0lCQUZROGhzL2IxdW9Mc3BFOApCdEJXaVVsTWh0K0xBc25yWXFncnd5UU5hdmlzNEdRdXVJdFk2MGRmdCtZb2hjQ2ViZ0RkbG1tWlUxdTJ6cGJtCjdEdUt5MVFaN21rV0dpLytEWUlUM3AxSHBMZ2pTRkFzRUorUFRnN1BQamc2UTZrRlZjUCt3Vm4yb0xmWVRkU28KZE5zbEdxSmNMaVQzVHRMNzhlcjFnTTE5RzN6T3J1ZndrSGJSYU1BRmtvZ1ExUlZLSWpnVGUvbmpIMHFHNW9JagoxNEJLeFFKTUZFTG1pQk50NUx5OVMxWWdxTDRjbmNtUDN5L1QyNEdodVhNckx0eTVOeVhnS0dFZ1pUTDMzZzZvCnYreDFFMFRURWRjMVQvWVBGWkdBSXhHdWRKNWZZZ2JtWU9LZ09mUHZFOE9TbEV6OW56aHNnckVZYjdQVThpZDUKTHFycVJRRUNnWUVBNjIyT3RIUmMxaVY1ZXQxdHQydTVTTTlTS2h2b0lPT3d2Q3NnTEI5dDJzNEhRUlRYN0RXcAo0VDNpUC9leEl5OXI3bTIxNFo5MEgzZlpVNElSUkdHSUxKUVMrYzRQNVA4cHJFTDcyd1dIWlpQTTM3QlZTQ1U3CkxOTXl4TkRjeVdjSUJIVFh4NUY2eXhLNVFXWTg5MVB0eDlDamJFSEcrNVJVdDA4UVlMWDlUQTBDZ1lFQXhPSmYKcXFjeThMOVZyYUFVZG9lbGdIU0NGSkJRR3hMRFNSQlJSTkRIOUJhaWlZOCtwZzd2TExTRXFMRFpsbkZPbFkrQQpiRENEQ0RtdHhwRXViY0x6b3FnOXhlQTZ0eXZZWkNWalY5dXVzNVh1Wmk1VDBBUHhCdm56OHNNa3dRY3RQWkRQCk8zQTN4WllkZzJBRmFrV1BmT1FFbjVaK3F4TU13SG9VZ1ZwQkptRUNnWUJ2Q2FjcTJVOEgrWGpJU0ROOU5TT1kKZ1ovaEdIUnRQcmFXcVVodFJ3MkxDMjFFZHM0NExEOUphdVNSQXdQYThuelhZWXROTk9XU0NmYkllaW9tdEZHRApwUHNtTXRnd1MyQ2VUS0Y0OWF5Y2JnOU0yVi8vdlAraDdxS2RUVjAwNkpGUmVNSms3K3FZYU9aVFFDTTFDN0swCmNXVUNwQ3R6Y014Y0FNQmF2THNRNlFLQmdHbXJMYmxEdjUxaXM3TmFKV0Z3Y0MwL1dzbDZvdVBFOERiNG9RV1UKSUowcXdOV2ZvZm95TGNBS3F1QjIrbkU2SXZrMmFiQ25ZTXc3V0w4b0VJa3NodUtYOVgrTVZ6Y1VPekdVdDNyaQpGeU9mcHJJRXowcm5zcWNSNUJJNUZqTGJqVFpyMEMyUWp2NW5FVFAvaHlpQWFRQ1l5THAyWlVtZ0Vjb0VPNWtwClBhcEJBb0dBZVV0WjE0SVp2cVorQnAxR1VqSG9PR0pQVnlJdzhSRUFETjRhZXRJTUlQRWFVaDdjZUtWdVN6VXMKci9WczA1Zjg0cFBVaStuUTUzaGo2ZFhhYTd1UE1aMFBnNFY4cS9UdzJMZ3BWWndVd0ltZUQrcXNsbldha3VWMQpMSnp3SkhOa3pOWE1OMmJWREFZTndSamNRSmhtbzF0V2xHYlpRQjNoSkEwR2thWGZPa2c9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" +) + +var _ = Describe("Peer configuration", func() { + Context("reading and writing peer configuration file", func() { + BeforeEach(func() { + coreConfig := &config.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + ID: "test", + }, + }, + } + + err := coreConfig.WriteToFile("/tmp/core.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + It("creates core.yaml", func() { + Expect("/tmp/core.yaml").Should(BeAnExistingFile()) + }) + + It("read core.yaml", func() { + core, err := config.ReadCoreFile("/tmp/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("test")) + }) + }) + + It("merges current configuration with overrides values", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + newConfig := &config.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + ID: "test", + BCCSP: &common.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &common.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &common.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + Discovery: v1.Discovery{ + Enabled: pointer.False(), + }, + Keepalive: v1.KeepAlive{ + MinInterval: common.MustParseDuration("13s"), + }, + DeliveryClient: v1.DeliveryClient{ + AddressOverrides: []v1.AddressOverride{ + v1.AddressOverride{ + From: "old", + To: "new", + CACertsFile: certB64, + }, + }, + }, + }, + }, + } + + Expect(core.Peer.Keepalive.MinInterval).To(Equal(common.MustParseDuration("60s"))) + + err = core.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("test")) + Expect(core.Peer.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(core.Peer.BCCSP.PKCS11.Label).To(Equal("label2")) + Expect(core.Peer.BCCSP.PKCS11.Pin).To(Equal("2222")) + Expect(core.Peer.BCCSP.PKCS11.HashFamily).To(Equal("SHA3")) + Expect(core.Peer.BCCSP.PKCS11.SecLevel).To(Equal(512)) + Expect(core.Peer.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore3")) + + Expect(core.Peer.Keepalive.MinInterval).To(Equal(common.MustParseDuration("13s"))) + + Expect(core.Peer.DeliveryClient.AddressOverrides[0].From).To(Equal("old")) + Expect(core.Peer.DeliveryClient.AddressOverrides[0].To).To(Equal("new")) + Expect(core.Peer.DeliveryClient.AddressOverrides[0].CACertsFile).To(Equal("/orderer/certs/cert0.pem")) + + Expect(*core.Peer.Discovery.Enabled).To(Equal(false)) + }) + + It("merges with default values", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + newConfig := &config.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + ID: "test", + BCCSP: &common.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &common.PKCS11Opts{ + Label: "label2", + Pin: "2222", + }, + }, + Discovery: v1.Discovery{ + Enabled: pointer.False(), + }, + }, + }, + } + + err = core.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("test")) + Expect(core.Peer.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(core.Peer.BCCSP.PKCS11.Label).To(Equal("label2")) + Expect(core.Peer.BCCSP.PKCS11.Pin).To(Equal("2222")) + Expect(core.Peer.BCCSP.PKCS11.HashFamily).To(Equal("SHA2")) + Expect(core.Peer.BCCSP.PKCS11.SecLevel).To(Equal(256)) + Expect(core.Peer.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore2")) + }) + + It("reads in core.yaml and unmarshal it to peer config", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + + peerConfig := core.Peer + By("setting ID", func() { + Expect(peerConfig.ID).To(Equal("jdoe")) + }) + + By("setting NetworkID", func() { + Expect(peerConfig.NetworkID).To(Equal("dev")) + }) + + By("setting ListenAddress", func() { + Expect(peerConfig.ListenAddress).To(Equal("0.0.0.0:7051")) + }) + + By("setting ChaincodeListenAddress", func() { + Expect(peerConfig.ChaincodeListenAddress).To(Equal("0.0.0.0:7052")) + }) + + By("setting ChaincodeAddress", func() { + Expect(peerConfig.ChaincodeAddress).To(Equal("0.0.0.0:7053")) + }) + + By("setting Address", func() { + Expect(peerConfig.Address).To(Equal("0.0.0.0:7054")) + }) + + By("setting AddressAutoDetect", func() { + Expect(*peerConfig.AddressAutoDetect).To(Equal(true)) + }) + + By("setting FileSystemPath", func() { + Expect(peerConfig.FileSystemPath).To(Equal("/var/hyperledger/production")) + }) + + By("setting MspConfigPath", func() { + Expect(peerConfig.MspConfigPath).To(Equal("msp")) + }) + + By("setting LocalMspId", func() { + Expect(peerConfig.LocalMspId).To(Equal("SampleOrg")) + }) + + By("setting LocalMspType", func() { + Expect(peerConfig.LocalMspType).To(Equal("bccsp")) + }) + + By("setting ValidatorPoolSize", func() { + Expect(peerConfig.ValidatorPoolSize).To(Equal(5)) + }) + // KeepAlive + + By("setting Keepalive.MinInterval", func() { + d, err := common.ParseDuration("60s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Keepalive.MinInterval).To(Equal(d)) + }) + + By("setting Keepalive.Client.Interval", func() { + d, err := common.ParseDuration("60s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Keepalive.Client.Interval).To(Equal(d)) + }) + + By("setting Keepalive.Client.Timeout", func() { + d, err := common.ParseDuration("20s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Keepalive.Client.Timeout).To(Equal(d)) + }) + + By("setting Keepalive.DeliveryClient.Interval", func() { + d, err := common.ParseDuration("60s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Keepalive.DeliveryClient.Interval).To(Equal(d)) + }) + + By("setting Keepalive.DeliveryClient.Timeout", func() { + d, err := common.ParseDuration("20s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Keepalive.DeliveryClient.Timeout).To(Equal(d)) + }) + + // Gossip + By("setting Gossip.Bootstrap", func() { + Expect(peerConfig.Gossip.Bootstrap).To(Equal([]string{"127.0.0.1:7051", "127.0.0.1:7052"})) + }) + + By("setting Gossip.UseLeaderElection", func() { + Expect(*peerConfig.Gossip.UseLeaderElection).To(Equal(true)) + }) + + By("setting Gossip.OrgLeader", func() { + Expect(*peerConfig.Gossip.OrgLeader).To(Equal(true)) + }) + + By("setting Gossip.MembershipTrackerInterval", func() { + d, err := common.ParseDuration("5s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.MembershipTrackerInterval).To(Equal(d)) + }) + + By("setting Gossip.Endpoint", func() { + Expect(peerConfig.Gossip.Endpoint).To(Equal("endpoint1")) + }) + + By("setting Gossip.MaxBlockCountToStore", func() { + Expect(peerConfig.Gossip.MaxBlockCountToStore).To(Equal(10)) + }) + + By("setting Gossip.MaxPropogationBurstLatency", func() { + d, err := common.ParseDuration("10ms") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.MaxPropagationBurstLatency).To(Equal(d)) + }) + + By("setting Gossip.MaxPropogationBurstSize", func() { + Expect(peerConfig.Gossip.MaxPropagationBurstSize).To(Equal(10)) + }) + + By("setting Gossip.PropagateIterations", func() { + Expect(peerConfig.Gossip.PropagateIterations).To(Equal(1)) + }) + + By("setting Gossip.PropagatePeerNum", func() { + Expect(peerConfig.Gossip.PropagatePeerNum).To(Equal(3)) + }) + + By("setting Gossip.PullInterval", func() { + d, err := common.ParseDuration("4s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.PullInterval).To(Equal(d)) + }) + + By("setting Gossip.PullPeerNum", func() { + Expect(peerConfig.Gossip.PullPeerNum).To(Equal(3)) + }) + + By("setting Gossip.RequestStateInfoInterval", func() { + d, err := common.ParseDuration("4s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.RequestStateInfoInterval).To(Equal(d)) + }) + + By("setting Gossip.PublishStateInfoInterval", func() { + d, err := common.ParseDuration("4s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.PublishStateInfoInterval).To(Equal(d)) + }) + + By("setting Gossip.StateInfoRetentionInterval", func() { + d, err := common.ParseDuration("2s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.StateInfoRetentionInterval).To(Equal(d)) + }) + + By("setting Gossip.PublishCertPeriod", func() { + d, err := common.ParseDuration("10s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.PublishCertPeriod).To(Equal(d)) + }) + + By("setting Gossip.SkipBlockVerification", func() { + Expect(*peerConfig.Gossip.SkipBlockVerification).To(Equal(true)) + }) + + By("setting Gossip.DialTimeout", func() { + d, err := common.ParseDuration("3s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.DialTimeout).To(Equal(d)) + }) + + By("setting Gossip.ConnTimeout", func() { + d, err := common.ParseDuration("2s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.ConnTimeout).To(Equal(d)) + }) + + By("setting Gossip.RecvBuffSize", func() { + Expect(peerConfig.Gossip.RecvBuffSize).To(Equal(20)) + }) + + By("setting Gossip.SendBuffSize", func() { + Expect(peerConfig.Gossip.SendBuffSize).To(Equal(200)) + }) + + By("setting Gossip.DigestWaitTime", func() { + d, err := common.ParseDuration("1s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.DigestWaitTime).To(Equal(d)) + }) + + By("setting Gossip.RequestWaitTime", func() { + d, err := common.ParseDuration("1500ms") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.RequestWaitTime).To(Equal(d)) + }) + + By("setting Gossip.ResponseWaitTime", func() { + d, err := common.ParseDuration("2s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.ResponseWaitTime).To(Equal(d)) + }) + + By("setting Gossip.AliveTimeInterval", func() { + d, err := common.ParseDuration("5s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.AliveTimeInterval).To(Equal(d)) + }) + + By("setting Gossip.AliveExpirationTimeout", func() { + d, err := common.ParseDuration("25s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.AliveExpirationTimeout).To(Equal(d)) + }) + + By("setting Gossip.ReconnectInterval", func() { + d, err := common.ParseDuration("25s") + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Gossip.ReconnectInterval).To(Equal(d)) + }) + + By("setting Gossip.ExternalEndpoint", func() { + Expect(peerConfig.Gossip.ExternalEndpoint).To(Equal("externalEndpoint1")) + }) + + // BCCSP + By("setting BCCSP.ProviderName", func() { + Expect(peerConfig.BCCSP.ProviderName).To(Equal("SW")) + }) + + By("setting BCCSP.SW.HashFamily", func() { + Expect(peerConfig.BCCSP.SW.HashFamily).To(Equal("SHA2")) + }) + + By("setting BCCSP.SW.SecLevel", func() { + Expect(peerConfig.BCCSP.SW.SecLevel).To(Equal(256)) + }) + + By("setting BCCSP.SW.FileKeystore.KeystorePath", func() { + Expect(peerConfig.BCCSP.SW.FileKeyStore.KeyStorePath).To(Equal("keystore1")) + }) + + By("setting BCCSP.PKCS11.Library", func() { + Expect(peerConfig.BCCSP.PKCS11.Library).To(Equal("library1")) + }) + + By("setting BCCSP.PKCS11.Label", func() { + Expect(peerConfig.BCCSP.PKCS11.Label).To(Equal("label1")) + }) + + By("setting BCCSP.PKCS11.Pin", func() { + Expect(peerConfig.BCCSP.PKCS11.Pin).To(Equal("1234")) + }) + + By("setting BCCSP.PKCS11.HashFamily", func() { + Expect(peerConfig.BCCSP.PKCS11.HashFamily).To(Equal("SHA2")) + }) + + By("setting BCCSP.PKCS11.Security", func() { + Expect(peerConfig.BCCSP.PKCS11.SecLevel).To(Equal(256)) + }) + + By("setting BCCSP.PKCS11.FileKeystore.KeystorePath", func() { + Expect(peerConfig.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore2")) + }) + + // Discovery + By("setting Discovery.Enabled", func() { + Expect(*peerConfig.Discovery.Enabled).To(Equal(true)) + }) + + By("setting Discovery.AuthCacheEnabled", func() { + Expect(*peerConfig.Discovery.AuthCacheEnabled).To(Equal(true)) + }) + + By("setting Discovery.AuthCacheMaxSize", func() { + Expect(peerConfig.Discovery.AuthCacheMaxSize).To(Equal(1000)) + }) + + By("setting Discovery.AuthCachePurgeRetentionRatio", func() { + Expect(peerConfig.Discovery.AuthCachePurgeRetentionRatio).To(Equal(0.75)) + }) + + By("setting Discovery.OrgMembersAllowedAccess", func() { + Expect(*peerConfig.Discovery.OrgMembersAllowedAccess).To(Equal(true)) + }) + + By("setting Limits.Concurrency.Qscc", func() { + Expect(peerConfig.Limits.Concurrency.Qscc).To(Equal(5000)) + }) + + // Handlers + By("setting Handlers.AuthFilters", func() { + Expect(peerConfig.Handlers.AuthFilters).To(Equal([]v1.HandlerConfig{ + v1.HandlerConfig{ + Name: "DefaultAuth", + }, + v1.HandlerConfig{ + Name: "ExpirationCheck", + }, + })) + }) + + By("setting Handlers.Decorators", func() { + Expect(peerConfig.Handlers.Decorators).To(Equal([]v1.HandlerConfig{ + v1.HandlerConfig{ + Name: "DefaultDecorator", + }, + })) + }) + + By("setting Handlers.Endorsers", func() { + Expect(peerConfig.Handlers.Endorsers).To(Equal(v1.PluginMapping{ + "escc": v1.HandlerConfig{ + Name: "DefaultEndorsement", + }, + })) + }) + + By("setting Handlers.Validators", func() { + Expect(peerConfig.Handlers.Validators).To(Equal(v1.PluginMapping{ + "vscc": v1.HandlerConfig{ + Name: "DefaultValidation", + }, + })) + }) + }) + + Context("chaincode configuration", func() { + It("reads in core.yaml and unmarshal it to chaincode config", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + + chaincode := core.Chaincode + By("setting Chaincode.StartupTimeout", func() { + d, err := common.ParseDuration("300s") + Expect(err).NotTo(HaveOccurred()) + Expect(chaincode.StartupTimeout).To(Equal(d)) + }) + + By("setting Chaincode.ExecuteTimeout", func() { + d, err := common.ParseDuration("30s") + Expect(err).NotTo(HaveOccurred()) + Expect(chaincode.ExecuteTimeout).To(Equal(d)) + }) + }) + + It("merges current configuration with overrides values", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + startupTimeout, err := common.ParseDuration("200s") + Expect(err).NotTo(HaveOccurred()) + executeTimeout, err := common.ParseDuration("20s") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &config.Core{ + Core: v1.Core{ + Chaincode: v1.Chaincode{ + StartupTimeout: startupTimeout, + ExecuteTimeout: executeTimeout, + }, + }, + } + + err = core.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Chaincode.StartupTimeout).To(Equal(startupTimeout)) + Expect(core.Chaincode.ExecuteTimeout).To(Equal(executeTimeout)) + }) + }) + + Context("DeliveryClient.AddressOverrides", func() { + It("merges current configuration with overrides values", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + addressOverrides := []v1.AddressOverride{ + v1.AddressOverride{ + From: "address_old", + To: "address_new", + CACertsFile: certB64, + }, + } + + newConfig := &config.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + DeliveryClient: v1.DeliveryClient{ + AddressOverrides: addressOverrides, + }, + }, + }, + } + + err = core.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + + Expect(core.Peer.DeliveryClient.AddressOverrides[0].From).To(Equal(addressOverrides[0].From)) + Expect(core.Peer.DeliveryClient.AddressOverrides[0].To).To(Equal(addressOverrides[0].To)) + Expect(core.Peer.DeliveryClient.AddressOverrides[0].CACertsFile).To(Equal("/orderer/certs/cert0.pem")) + Expect(len(core.GetAddressOverrides()[0].GetCertBytes())).NotTo(Equal(0)) + }) + }) + + Context("operations configuration", func() { + It("merges current configuration with overrides values", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + + Expect(core.Operations.ListenAddress).To(Equal("127.0.0.1:9443")) + Expect(*core.Operations.TLS.Enabled).To(Equal(false)) + Expect(core.Operations.TLS.Certificate.File).To(Equal("cert.pem")) + Expect(core.Operations.TLS.PrivateKey.File).To(Equal("key.pem")) + Expect(*core.Operations.TLS.ClientAuthRequired).To(Equal(false)) + Expect(core.Operations.TLS.ClientRootCAs.Files).To(Equal([]string{"rootcert.pem"})) + + newConfig := &config.Core{ + Core: v1.Core{ + Operations: v1.Operations{ + ListenAddress: "localhost:8080", + TLS: v1.OperationsTLS{ + Enabled: pointer.True(), + Certificate: v1.File{ + File: "newcert.pem", + }, + PrivateKey: v1.File{ + File: "newkey.pem", + }, + ClientAuthRequired: pointer.True(), + ClientRootCAs: v1.Files{ + Files: []string{"newrootcert.pem", "newrootcert2.pem"}, + }, + }, + }, + }, + } + + err = core.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Operations.ListenAddress).To(Equal("localhost:8080")) + Expect(*core.Operations.TLS.Enabled).To(Equal(true)) + Expect(core.Operations.TLS.Certificate.File).To(Equal("newcert.pem")) + Expect(core.Operations.TLS.PrivateKey.File).To(Equal("newkey.pem")) + Expect(*core.Operations.TLS.ClientAuthRequired).To(Equal(true)) + Expect(core.Operations.TLS.ClientRootCAs.Files).To(Equal([]string{"newrootcert.pem", "newrootcert2.pem"})) + }) + }) + + Context("metrics configuration", func() { + It("merges current configuration with overrides values", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + + Expect(core.Metrics.Provider).To(Equal("prometheus")) + Expect(core.Metrics.Statsd.Network).To(Equal("udp")) + Expect(core.Metrics.Statsd.Address).To(Equal("127.0.0.1:8125")) + Expect(core.Metrics.Statsd.Prefix).To(Equal("")) + + writeInterval, err := common.ParseDuration("10s") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Metrics.Statsd.WriteInterval).To(Equal(writeInterval)) + + newWriteInterval, err := common.ParseDuration("15s") + Expect(err).NotTo(HaveOccurred()) + newConfig := &config.Core{ + Core: v1.Core{ + Metrics: v1.Metrics{ + Provider: "statsd", + Statsd: v1.Statsd{ + Network: "tcp", + Address: "localhost:8080", + WriteInterval: newWriteInterval, + Prefix: "prefix", + }, + }, + }, + } + + err = core.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + + Expect(core.Metrics.Provider).To(Equal("statsd")) + Expect(core.Metrics.Statsd.Network).To(Equal("tcp")) + Expect(core.Metrics.Statsd.Address).To(Equal("localhost:8080")) + Expect(core.Metrics.Statsd.Prefix).To(Equal("prefix")) + Expect(core.Metrics.Statsd.WriteInterval).To(Equal(newWriteInterval)) + }) + }) + + Context("updating peer.gossip.bootstrap if needed", func() { + It("reads core and converts peer.gossip.bootstrap", func() { + core, err := config.ReadCoreFile("../../../../../testdata/init/peer/core_bootstrap_test.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.Gossip.Bootstrap).To(Equal([]string{"127.0.0.1:7051"})) + }) + + It("returns error if invalid core (besides peer.gossip.boostrap field)", func() { + _, err := config.ReadCoreFile("../../../../../testdata/init/peer/core_invalid.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/initializer/peer/config/v1/deliveryclient.go b/pkg/initializer/peer/config/v1/deliveryclient.go new file mode 100644 index 00000000..e29720fc --- /dev/null +++ b/pkg/initializer/peer/config/v1/deliveryclient.go @@ -0,0 +1,66 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "fmt" + + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +type DeliveryClient struct { + v1.DeliveryClient +} + +type AddressOverride struct { + v1.AddressOverride + certBytes []byte +} + +func (a *AddressOverride) CACertsFileToBytes() ([]byte, error) { + data, err := util.Base64ToBytes(a.CACertsFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func (a *AddressOverride) GetCertBytes() []byte { + return a.certBytes +} + +func (d *DeliveryClient) HandleCAcertsFiles() ([]AddressOverride, error) { + addrOverrides := []AddressOverride{} + + for i, addr := range d.AddressOverrides { + addrOverride := AddressOverride{AddressOverride: addr} + certBytes, err := addrOverride.CACertsFileToBytes() + if err != nil { + return nil, err + } + addrOverride.certBytes = certBytes + addrOverrides = append(addrOverrides, addrOverride) + + d.AddressOverrides[i].CACertsFile = fmt.Sprintf("/orderer/certs/cert%d.pem", i) + } + + return addrOverrides, nil +} diff --git a/pkg/initializer/peer/config/v1/io.go b/pkg/initializer/peer/config/v1/io.go new file mode 100644 index 00000000..19d390da --- /dev/null +++ b/pkg/initializer/peer/config/v1/io.go @@ -0,0 +1,63 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "io/ioutil" + "path/filepath" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/commoncore" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +func ReadCoreFile(path string) (*Core, error) { + core, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + return coreFromBytes(core) +} + +func ReadFrom(from *[]byte) (*Core, error) { + return coreFromBytes(*from) +} + +func ReadCoreFromBytes(core []byte) (*Core, error) { + return coreFromBytes(core) +} + +func coreFromBytes(coreBytes []byte) (*Core, error) { + coreConfig := &Core{} + err := yaml.Unmarshal(coreBytes, coreConfig) + if err != nil { + // Check if peer.gossip.bootstrap needs to be converted + updatedCore, err := commoncore.ConvertBootstrapToArray(coreBytes) + if err != nil { + return nil, errors.Wrap(err, "failed to convert peer.gossip.bootstrap to string array") + } + err = yaml.Unmarshal(updatedCore, coreConfig) + if err != nil { + return nil, err + } + } + + return coreConfig, nil +} diff --git a/pkg/initializer/peer/config/v2/config.go b/pkg/initializer/peer/config/v2/config.go new file mode 100644 index 00000000..7d66fc2e --- /dev/null +++ b/pkg/initializer/peer/config/v2/config.go @@ -0,0 +1,197 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2 + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/commoncore" + v1config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Core struct { + v2.Core `json:",inline"` + addrOverrides []v1config.AddressOverride +} + +func (c *Core) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(c) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (c *Core) WriteToFile(path string) error { + bytes, err := yaml.Marshal(c) + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Clean(path), bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (c *Core) MergeWith(newConfig interface{}, usingHSMProxy bool) error { + newCore := newConfig.(*Core) + + if newCore != nil { + err := merge.WithOverwrite(c, newCore) + if err != nil { + return errors.Wrapf(err, "failed to merge peer configuration overrides") + } + } + + if c.UsingPKCS11() { + c.SetPKCS11Defaults(usingHSMProxy) + } + + dc := v1config.DeliveryClient{DeliveryClient: c.Peer.DeliveryClient} + addrOverrides, err := dc.HandleCAcertsFiles() + if err != nil { + return errors.Wrapf(err, "failed to convert base64 certs to filepath") + } + c.Peer.DeliveryClient = dc.DeliveryClient + c.addrOverrides = addrOverrides + + return nil +} + +func (c *Core) DeepCopyInto(into *Core) { + b, err := json.Marshal(c) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (c *Core) DeepCopy() *Core { + if c == nil { + return nil + } + out := new(Core) + c.DeepCopyInto(out) + return out +} + +func (c *Core) UsingPKCS11() bool { + if c.Peer.BCCSP != nil { + if strings.ToLower(c.Peer.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (c *Core) SetPKCS11Defaults(usingHSMProxy bool) { + if c.Peer.BCCSP.PKCS11 == nil { + c.Peer.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + if usingHSMProxy { + c.Peer.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if c.Peer.BCCSP.PKCS11.HashFamily == "" { + c.Peer.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if c.Peer.BCCSP.PKCS11.SecLevel == 0 { + c.Peer.BCCSP.PKCS11.SecLevel = 256 + } + + c.Peer.BCCSP.PKCS11.SoftVerify = true +} + +func (c *Core) SetDefaultKeyStore() { + // No-op + return +} + +func (c *Core) GetMaxNameLength() *int { + return c.MaxNameLength +} + +func (c *Core) GetAddressOverrides() []v1config.AddressOverride { + return c.addrOverrides +} + +func (c *Core) GetBCCSPSection() *common.BCCSP { + return c.Peer.BCCSP +} + +func (c *Core) SetBCCSPLibrary(library string) { + if c.Peer.BCCSP.PKCS11 == nil { + c.Peer.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + c.Peer.BCCSP.PKCS11.Library = library +} + +func ReadCoreFile(path string) (*Core, error) { + core, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + return coreFromBytes(core) +} + +func ReadCoreFromBytes(core []byte) (*Core, error) { + return coreFromBytes(core) +} + +func ReadFrom(from *[]byte) (*Core, error) { + return coreFromBytes(*from) +} + +func coreFromBytes(coreBytes []byte) (*Core, error) { + coreConfig := &Core{} + err := yaml.Unmarshal(coreBytes, coreConfig) + if err != nil { + // Check if peer.gossip.bootstrap needs to be converted + updatedCore, err := commoncore.ConvertBootstrapToArray(coreBytes) + if err != nil { + return nil, errors.Wrap(err, "failed to convert peer.gossip.bootstrap to string array") + } + err = yaml.Unmarshal(updatedCore, coreConfig) + if err != nil { + return nil, err + } + } + + return coreConfig, nil +} diff --git a/pkg/initializer/peer/config/v2/config_test.go b/pkg/initializer/peer/config/v2/config_test.go new file mode 100644 index 00000000..581bd075 --- /dev/null +++ b/pkg/initializer/peer/config/v2/config_test.go @@ -0,0 +1,129 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2core "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Peer configuration", func() { + It("merges current configuration with overrides values", func() { + core, err := v2.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + newConfig := &v2.Core{ + Core: v2core.Core{ + Peer: v2core.Peer{ + BCCSP: &common.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &common.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &common.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + }, + }, + } + + Expect(core.Peer.Keepalive.MinInterval).To(Equal(common.MustParseDuration("60s"))) + + err = core.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + + Expect(*core.Peer.BCCSP.PKCS11).To(Equal(common.PKCS11Opts{ + Library: "/usr/local/lib/libpkcs11-proxy.so", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + SoftVerify: true, + FileKeyStore: &common.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + })) + }) + + Context("chaincode configuration", func() { + It("merges v2 current configuration with overrides values", func() { + core, err := v2.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + startupTimeout, err := common.ParseDuration("200s") + Expect(err).NotTo(HaveOccurred()) + executeTimeout, err := common.ParseDuration("20s") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &v2.Core{ + Core: v2core.Core{ + Chaincode: v2core.Chaincode{ + StartupTimeout: startupTimeout, + ExecuteTimeout: executeTimeout, + ExternalBuilders: []v2core.ExternalBuilder{ + v2core.ExternalBuilder{ + Path: "/scripts", + Name: "go-builder", + EnvironmentWhiteList: []string{"ENV1=Value1"}, + PropogateEnvironment: []string{"ENV1=Value1"}, + }, + }, + }, + }, + } + + err = core.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Chaincode.StartupTimeout).To(Equal(startupTimeout)) + Expect(core.Chaincode.ExecuteTimeout).To(Equal(executeTimeout)) + + Expect(core.Chaincode.ExternalBuilders[0]).To(Equal( + v2core.ExternalBuilder{ + Path: "/scripts", + Name: "go-builder", + EnvironmentWhiteList: []string{"ENV1=Value1"}, + PropogateEnvironment: []string{"ENV1=Value1"}, + }, + )) + }) + }) + + Context("read in core file", func() { + It("reads core and converts peer.gossip.bootstrap", func() { + core, err := v2.ReadCoreFile("../../../../../testdata/init/peer/core_bootstrap_test.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.Gossip.Bootstrap).To(Equal([]string{"127.0.0.1:7051"})) + }) + + It("returns error if invalid core (besides peer.gossip.boostrap field)", func() { + _, err := v2.ReadCoreFile("../../../../../testdata/init/peer/core_invalid.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/initializer/peer/config/v2/v2_suite_test.go b/pkg/initializer/peer/config/v2/v2_suite_test.go new file mode 100644 index 00000000..bbd7a82d --- /dev/null +++ b/pkg/initializer/peer/config/v2/v2_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestV2(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V2 Suite") +} diff --git a/pkg/initializer/peer/coreconfigmap.go b/pkg/initializer/peer/coreconfigmap.go new file mode 100644 index 00000000..63a5dda4 --- /dev/null +++ b/pkg/initializer/peer/coreconfigmap.go @@ -0,0 +1,218 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "context" + "fmt" + "io/ioutil" + "path/filepath" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + configv1 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + configv2 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +//go:generate counterfeiter -o mocks/client.go -fake-name Client ../../k8s/controllerclient Client + +type CoreConfigMap struct { + Config *Config + Scheme *runtime.Scheme + GetLabels func(instance metav1.Object) map[string]string + Client k8sclient.Client +} + +func (c *CoreConfigMap) GetCoreConfig(instance *current.IBPPeer) (*corev1.ConfigMap, error) { + return common.GetConfigFromConfigMap(c.Client, instance) +} + +func (c *CoreConfigMap) CreateOrUpdate(instance *current.IBPPeer, peer CoreConfig) error { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-config", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: c.GetLabels(instance), + }, + BinaryData: map[string][]byte{}, + } + + existing, err := c.GetCoreConfig(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + if existing != nil { + cm.BinaryData = existing.BinaryData + } + + peerBytes, err := peer.ToBytes() + if err != nil { + return err + } + cm.BinaryData["core.yaml"] = peerBytes + + err = c.addNodeOU(instance, cm) + if err != nil { + return err + } + + err = c.Client.CreateOrUpdate(context.TODO(), cm, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: c.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create or update Peer config map") + } + + return nil + +} + +func (c *CoreConfigMap) AddNodeOU(instance *current.IBPPeer) error { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-config", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: c.GetLabels(instance), + }, + BinaryData: map[string][]byte{}, + } + + existing, err := c.GetCoreConfig(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + if existing != nil { + cm.BinaryData = existing.BinaryData + } + + err = c.addNodeOU(instance, cm) + if err != nil { + return err + } + + err = c.Client.CreateOrUpdate(context.TODO(), cm, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: c.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create or update Peer config map") + } + + return nil +} + +func (c *CoreConfigMap) addNodeOU(instance *current.IBPPeer, cm *corev1.ConfigMap) error { + if !instance.Spec.NodeOUDisabled() { + configFilePath := c.Config.OUFile + + // Check if both intermediate ecerts and tlscerts exists + if util.IntermediateSecretExists(c.Client, instance.Namespace, fmt.Sprintf("ecert-%s-intercerts", instance.Name)) && + util.IntermediateSecretExists(c.Client, instance.Namespace, fmt.Sprintf("tls-%s-intercerts", instance.Name)) { + configFilePath = c.Config.InterOUFile + } + + ouBytes, err := ioutil.ReadFile(filepath.Clean(configFilePath)) + if err != nil { + return errors.Wrapf(err, "failed to read OU config file from '%s'", configFilePath) + } + + cm.BinaryData["config.yaml"] = ouBytes + } else { + // Set enabled to false in config + nodeOUConfig, err := config.NodeOUConfigFromBytes(cm.BinaryData["config.yaml"]) + if err != nil { + return err + } + + nodeOUConfig.NodeOUs.Enable = false + ouBytes, err := config.NodeOUConfigToBytes(nodeOUConfig) + if err != nil { + return err + } + + cm.BinaryData["config.yaml"] = ouBytes + } + + return nil +} + +func GetCoreFromConfigMap(client k8sclient.Client, instance *current.IBPPeer) (*corev1.ConfigMap, error) { + return common.GetConfigFromConfigMap(client, instance) +} + +func GetCoreConfigFromBytes(instance *current.IBPPeer, bytes []byte) (CoreConfig, error) { + switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { + case version.V2: + v2config, err := configv2.ReadCoreFromBytes(bytes) + if err != nil { + return nil, err + } + return v2config, nil + case version.V1: + fallthrough + default: + // Choosing to default to v1.4 to not break backwards comptability, if coming + // from a previous version of operator the 'FabricVersion' field would not be set and would + // result in an error. + v1config, err := configv1.ReadCoreFromBytes(bytes) + if err != nil { + return nil, err + } + return v1config, nil + } +} + +func GetCoreConfigFromFile(instance *current.IBPPeer, file string) (CoreConfig, error) { + switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { + case version.V2: + log.Info("v2 Fabric Peer requested") + v2config, err := configv2.ReadCoreFile(file) + if err != nil { + return nil, err + } + return v2config, nil + case version.V1: + fallthrough + default: + // Choosing to default to v1.4 to not break backwards comptability, if coming + // from a previous version of operator the 'FabricVersion' field would not be set and would + // result in an error. // TODO: Determine if we want to throw error or handle setting + // FabricVersion as part of migration logic. + log.Info("v1 Fabric Peer requested") + pconfig, err := configv1.ReadCoreFile(file) + if err != nil { + return nil, err + } + return pconfig, nil + } +} diff --git a/pkg/initializer/peer/coreconfigmap_test.go b/pkg/initializer/peer/coreconfigmap_test.go new file mode 100644 index 00000000..c67e8f44 --- /dev/null +++ b/pkg/initializer/peer/coreconfigmap_test.go @@ -0,0 +1,143 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/mocks" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("core config map", func() { + var ( + coreCM *initializer.CoreConfigMap + instance *current.IBPPeer + client *mocks.Client + ) + + BeforeEach(func() { + client = &mocks.Client{} + coreCM = &initializer.CoreConfigMap{ + Config: &initializer.Config{ + CorePeerFile: "../../../defaultconfig/peer/core.yaml", + CorePeerV2File: "../../../defaultconfig/peer/v2/core.yaml", + OUFile: "../../../defaultconfig/peer/ouconfig.yaml", + InterOUFile: "../../../defaultconfig/peer/ouconfig-inter.yaml", + }, + Client: client, + GetLabels: func(o metav1.Object) map[string]string { return map[string]string{} }, + } + + instance = ¤t.IBPPeer{} + + client.GetStub = func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + if types.Name == fmt.Sprintf("%s-config", instance.Name) { + cm := obj.(*corev1.ConfigMap) + cm.BinaryData = map[string][]byte{} + } + } + return nil + } + }) + + Context("get core config", func() { + It("returns config map containing peer's core config", func() { + cm, err := coreCM.GetCoreConfig(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(cm).NotTo(BeNil()) + }) + }) + + Context("create or update config map", func() { + BeforeEach(func() { + client.GetStub = func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + if types.Name == fmt.Sprintf("%s-config", instance.Name) { + cm := obj.(*corev1.ConfigMap) + cm.BinaryData = map[string][]byte{} + } + } + return nil + } + + }) + + It("adds default configs", func() { + err := coreCM.CreateOrUpdate(instance, &v2.Core{}) + Expect(err).NotTo(HaveOccurred()) + + By("adding node OU config section", func() { + _, obj, _ := client.CreateOrUpdateArgsForCall(0) + + cm := obj.(*corev1.ConfigMap) + Expect(cm.BinaryData["config.yaml"]).To(ContainSubstring("Enable: true")) + }) + }) + }) + + Context("add node ou to config map", func() { + When("nodeoudisabled is set to false", func() { + BeforeEach(func() { + f := false + instance.Spec.DisableNodeOU = &f + }) + + It("adds nodeou configs as enabled", func() { + err := coreCM.AddNodeOU(instance) + Expect(err).NotTo(HaveOccurred()) + + _, obj, _ := client.CreateOrUpdateArgsForCall(0) + + cm := obj.(*corev1.ConfigMap) + Expect(cm.BinaryData["config.yaml"]).To(ContainSubstring("Enable: true")) + }) + }) + + When("nodeoudisabled is set to true", func() { + BeforeEach(func() { + t := true + instance.Spec.DisableNodeOU = &t + }) + + It("adds nodeou configs as disabled", func() { + err := coreCM.AddNodeOU(instance) + Expect(err).NotTo(HaveOccurred()) + + _, obj, _ := client.CreateOrUpdateArgsForCall(0) + + cm := obj.(*corev1.ConfigMap) + Expect(cm.BinaryData["config.yaml"]).To(ContainSubstring("Enable: false")) + }) + }) + }) +}) diff --git a/pkg/initializer/peer/initializer.go b/pkg/initializer/peer/initializer.go new file mode 100644 index 00000000..1f401dbb --- /dev/null +++ b/pkg/initializer/peer/initializer.go @@ -0,0 +1,327 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "context" + "fmt" + "os" + "path/filepath" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/secretmanager" + configv1 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/pkg/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("peer_initializer") + +type Config struct { + OUFile string + InterOUFile string + CorePeerFile string + CorePeerV2File string + DeploymentFile string + PVCFile string + CouchDBPVCFile string + ServiceFile string + RoleFile string + ServiceAccountFile string + RoleBindingFile string + FluentdConfigMapFile string + CouchContainerFile string + CouchInitContainerFile string + IngressFile string + Ingressv1beta1File string + CCLauncherFile string + RouteFile string + StoragePath string +} + +//go:generate counterfeiter -o mocks/ibppeer.go -fake-name IBPPeer . IBPPeer + +type IBPPeer interface { + DeliveryClientCrypto() map[string][]byte + OverrideConfig(CoreConfig) error + GenerateCrypto() (*config.CryptoResponse, error) + GetConfig() CoreConfig +} + +type PeerConfig interface { + MergeWith(interface{}, bool) error + GetAddressOverrides() []configv1.AddressOverride + ToBytes() ([]byte, error) + UsingPKCS11() bool + SetPKCS11Defaults(bool) + GetBCCSPSection() *commonapi.BCCSP + GetMaxNameLength() *int + SetDefaultKeyStore() +} + +type Initializer struct { + Config *Config + Scheme *runtime.Scheme + GetLabels func(instance metav1.Object) map[string]string + coreConfigMap *CoreConfigMap + Timeouts enroller.HSMEnrollJobTimeouts + + Client k8sclient.Client + Validator common.CryptoValidator + SecretManager *secretmanager.SecretManager +} + +func New(config *Config, scheme *runtime.Scheme, client k8sclient.Client, labels func(instance metav1.Object) map[string]string, validator common.CryptoValidator, timeouts enroller.HSMEnrollJobTimeouts) *Initializer { + secretManager := secretmanager.New(client, scheme, labels) + + return &Initializer{ + Client: client, + Config: config, + Scheme: scheme, + GetLabels: labels, + Validator: validator, + SecretManager: secretManager, + coreConfigMap: &CoreConfigMap{Config: config, Scheme: scheme, GetLabels: labels, Client: client}, + Timeouts: timeouts, + } +} + +type Response struct { + Config CoreConfig + Crypto *config.CryptoResponse + DeliveryClientCerts map[string][]byte +} + +func (i *Initializer) Create(overrides CoreConfig, peer IBPPeer, storagePath string) (*Response, error) { + var err error + + err = os.RemoveAll(storagePath) + if err != nil { + return nil, err + } + + err = peer.OverrideConfig(overrides) + if err != nil { + return nil, err + } + + cresp, err := peer.GenerateCrypto() + if err != nil { + return nil, err + } + + err = os.RemoveAll(storagePath) + if err != nil { + return nil, err + } + + return &Response{ + Config: peer.GetConfig(), + DeliveryClientCerts: peer.DeliveryClientCrypto(), + Crypto: cresp, + }, nil +} + +func (i *Initializer) CoreConfigMap() *CoreConfigMap { + return i.coreConfigMap +} + +func (i *Initializer) Update(overrides CoreConfig, peer IBPPeer) (*Response, error) { + var err error + + err = peer.OverrideConfig(overrides) + if err != nil { + return nil, err + } + + return &Response{ + Config: peer.GetConfig(), + DeliveryClientCerts: peer.DeliveryClientCrypto(), + }, nil +} + +func (i *Initializer) GetEnrollers(cryptos *config.Cryptos, instance *current.IBPPeer, storagePath string) error { + // If no enrollment information provided, don't need to proceed further + if instance.Spec.Secret == nil || instance.Spec.Secret.Enrollment == nil { + return nil + } + + enrollmentSpec := instance.Spec.Secret.Enrollment + if enrollmentSpec.Component != nil && cryptos.Enrollment == nil { + bytes, err := enrollmentSpec.Component.GetCATLSBytes() + if err != nil { + return err + } + + // Factory will determine if HSM or non-HSM enroller needed and return back appropriate type + cryptos.Enrollment, err = enroller.Factory(enrollmentSpec.Component, i.Client, instance, + filepath.Join(storagePath, "ecert"), + i.Scheme, + bytes, + i.Timeouts, + ) + if err != nil { + return err + } + } + + // Common enrollers get software based enrollers for TLS and clientauth crypto, + // these types are not supported for HSM + err := common.GetCommonEnrollers(cryptos, enrollmentSpec, storagePath) + if err != nil { + return err + } + + return nil +} + +func (i *Initializer) GetMSPCrypto(cryptos *config.Cryptos, instance *current.IBPPeer) error { + if instance.Spec.Secret == nil || instance.Spec.Secret.MSP == nil { + return nil + } + + mspSpec := instance.Spec.Secret.MSP + err := common.GetMSPCrypto(cryptos, mspSpec) + if err != nil { + return err + } + + return nil +} + +func (i *Initializer) GetInitPeer(instance *current.IBPPeer, storagePath string) (*Peer, error) { + cryptos := &config.Cryptos{} + + if instance.Spec.Secret != nil { + // Prioritize any crypto passed through MSP spec first + err := i.GetMSPCrypto(cryptos, instance) + if err != nil { + return nil, errors.Wrap(err, "failed to populate init peer with MSP spec") + } + + err = i.GetEnrollers(cryptos, instance, storagePath) + if err != nil { + return nil, errors.Wrap(err, "failed to populate init peer with Enrollment spec") + } + } + + return &Peer{ + Cryptos: cryptos, + }, nil +} + +func (i *Initializer) GetUpdatedPeer(instance *current.IBPPeer) (*Peer, error) { + cryptos := &config.Cryptos{} + + // Only check for any new certs passed through MSP spec + err := i.GetMSPCrypto(cryptos, instance) + if err != nil { + return nil, errors.Wrap(err, "failed to populate updated init peer with MSP spec") + } + + return &Peer{ + Cryptos: cryptos, + }, nil +} + +func (i *Initializer) GenerateSecrets(prefix common.SecretType, instance metav1.Object, crypto *config.Response) error { + if crypto == nil { + return nil + } + return i.SecretManager.GenerateSecrets(prefix, instance, crypto) +} + +func (i *Initializer) GenerateSecretsFromResponse(instance *current.IBPPeer, cryptoResponse *config.CryptoResponse) error { + return i.SecretManager.GenerateSecretsFromResponse(instance, cryptoResponse) +} + +func (i *Initializer) UpdateSecrets(prefix common.SecretType, instance *current.IBPPeer, crypto *config.Response) error { + if crypto == nil { + return nil + } + return i.SecretManager.UpdateSecrets(prefix, instance, crypto) +} + +func (i *Initializer) UpdateSecretsFromResponse(instance *current.IBPPeer, cryptoResponse *config.CryptoResponse) error { + return i.SecretManager.UpdateSecretsFromResponse(instance, cryptoResponse) +} + +func (i *Initializer) GetCrypto(instance *current.IBPPeer) (*config.CryptoResponse, error) { + return i.SecretManager.GetCryptoResponseFromSecrets(instance) +} + +func (i *Initializer) GenerateOrdererCACertsSecret(instance *current.IBPPeer, certs map[string][]byte) error { + secretName := fmt.Sprintf("%s-orderercacerts", instance.GetName()) + err := i.CreateOrUpdateSecret(instance, secretName, certs) + if err != nil { + return err + } + + return nil +} + +func (i *Initializer) MissingCrypto(instance *current.IBPPeer) bool { + if instance.IsHSMEnabled() { + i.Validator.SetHSMEnabled(true) + } + + checkClientAuth := instance.ClientAuthCryptoSet() + err := common.CheckCrypto(i.Validator, instance, checkClientAuth) + if err != nil { + log.Info(err.Error()) + return true + } + + return false +} + +func (i *Initializer) CheckIfAdminCertsUpdated(instance *current.IBPPeer) (bool, error) { + current := common.GetAdminCertsFromSecret(i.Client, instance) + updated := common.GetAdminCertsFromSpec(instance.Spec.Secret) + + return common.CheckIfCertsDifferent(current, updated) +} + +func (i *Initializer) UpdateAdminSecret(instance *current.IBPPeer) error { + return i.SecretManager.UpdateAdminCertSecret(instance, instance.Spec.Secret) +} + +func (i *Initializer) CreateOrUpdateSecret(instance *current.IBPPeer, name string, data map[string][]byte) error { + log.Info(fmt.Sprintf("Creating secret '%s'", name)) + + secret := i.SecretManager.BuildSecret(instance, name, data, i.GetLabels(instance)) + err := i.Client.CreateOrUpdate(context.TODO(), secret, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: i.Scheme, + }) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/initializer/peer/initializer_test.go b/pkg/initializer/peer/initializer_test.go new file mode 100644 index 00000000..4a5bf395 --- /dev/null +++ b/pkg/initializer/peer/initializer_test.go @@ -0,0 +1,496 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "context" + "encoding/base64" + "encoding/pem" + "net/url" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + commonmocks "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mocks" + peer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +const ( + testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + testkey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" +) + +var _ = Describe("Initializing the Peer", func() { + var ( + peerinitializer *peer.Initializer + instance *current.IBPPeer + mockClient *controllermocks.Client + mockValidator *commonmocks.CryptoValidator + serverURL string + serverCert string + serverUrlObj *url.URL + ) + + BeforeEach(func() { + serverURL = server.URL + rawCert := server.Certificate().Raw + pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawCert}) + serverCert = string(util.BytesToBase64(pemCert)) + + urlObj, err := url.Parse(serverURL) + Expect(err).NotTo(HaveOccurred()) + serverUrlObj = urlObj + + mockClient = &controllermocks.Client{} + mockValidator = &commonmocks.CryptoValidator{} + getLabels := func(instance metav1.Object) map[string]string { + return map[string]string{} + } + peerinitializer = peer.New(nil, &runtime.Scheme{}, mockClient, getLabels, mockValidator, enroller.HSMEnrollJobTimeouts{}) + + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{ + CAHost: serverUrlObj.Hostname(), + CAPort: serverUrlObj.Port(), + EnrollID: "admin", + EnrollSecret: "adminpw", + CATLS: ¤t.CATLS{ + CACert: serverCert, + }, + AdminCerts: []string{testcert}, + }, + TLS: ¤t.Enrollment{ + CAHost: serverUrlObj.Hostname(), + CAPort: serverUrlObj.Port(), + EnrollID: "admin", + EnrollSecret: "adminpw", + CATLS: ¤t.CATLS{ + CACert: serverCert, + }, + }, + ClientAuth: ¤t.Enrollment{ + CAHost: serverUrlObj.Hostname(), + CAPort: serverUrlObj.Port(), + EnrollID: "admin", + EnrollSecret: "adminpw", + CATLS: ¤t.CATLS{ + CACert: serverCert, + }, + }, + }, + MSP: ¤t.MSPSpec{ + Component: ¤t.MSP{ + KeyStore: "key", + SignCerts: "cert", + CACerts: []string{"certs"}, + AdminCerts: []string{testcert}, + }, + TLS: ¤t.MSP{ + KeyStore: "key", + SignCerts: "cert", + CACerts: []string{"certs"}, + }, + ClientAuth: ¤t.MSP{ + KeyStore: "key", + SignCerts: "cert", + CACerts: []string{"certs"}, + }, + }, + }, + }, + } + }) + + Context("create", func() { + var peer *mocks.IBPPeer + + BeforeEach(func() { + peer = &mocks.IBPPeer{} + }) + + It("returns an error if it fails to override peer's config", func() { + msg := "failed to override" + peer.OverrideConfigReturns(errors.New(msg)) + + _, err := peerinitializer.Create(&config.Core{}, peer, "") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if it fails to generate crypto", func() { + msg := "failed to generate crypto" + peer.GenerateCryptoReturns(nil, errors.New(msg)) + + _, err := peerinitializer.Create(&config.Core{}, peer, "") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("creates and returns response containing config and crypto", func() { + _, err := peerinitializer.Create(&config.Core{}, peer, "blah") + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update", func() { + var peer *mocks.IBPPeer + + BeforeEach(func() { + peer = &mocks.IBPPeer{} + }) + + It("returns an error if it fails to override peer's config", func() { + msg := "failed to override" + peer.OverrideConfigReturns(errors.New(msg)) + + _, err := peerinitializer.Update(&config.Core{}, peer) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("creates and returns response containing config and crypto", func() { + _, err := peerinitializer.Update(&config.Core{}, peer) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("get init peer", func() { + It("returns empty init peer if neither MSP nor enrollment spec is passed", func() { + instance.Spec.Secret.MSP.TLS = nil + instance.Spec.Secret.Enrollment.TLS = nil + initpeer, err := peerinitializer.GetInitPeer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initpeer.Cryptos).NotTo(BeNil()) + Expect(initpeer.Cryptos.TLS).To(BeNil()) + }) + + It("returns init peer with ecert, tls, clientauth enrollers", func() { + initpeer, err := peerinitializer.GetInitPeer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initpeer.Cryptos).NotTo(BeNil()) + Expect(initpeer.Cryptos.Enrollment).NotTo(BeNil()) + Expect(initpeer.Cryptos.TLS).NotTo(BeNil()) + Expect(initpeer.Cryptos.ClientAuth).NotTo(BeNil()) + }) + + It("returns init peer with ecert, tls, clientauth msp parsers", func() { + initpeer, err := peerinitializer.GetInitPeer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initpeer.Cryptos).NotTo(BeNil()) + Expect(initpeer.Cryptos.Enrollment).NotTo(BeNil()) + Expect(initpeer.Cryptos.TLS).NotTo(BeNil()) + Expect(initpeer.Cryptos.ClientAuth).NotTo(BeNil()) + }) + + It("returns ecert msp parsers and tls enrollers", func() { + instance.Spec.Secret.Enrollment.Component = nil + instance.Spec.Secret.MSP.TLS = nil + initpeer, err := peerinitializer.GetInitPeer(instance, "foo") + Expect(err).NotTo(HaveOccurred()) + Expect(initpeer.Cryptos).NotTo(BeNil()) + Expect(initpeer.Cryptos.Enrollment).NotTo(BeNil()) + Expect(initpeer.Cryptos.TLS).NotTo(BeNil()) + }) + }) + + Context("generate secrets", func() { + var ( + resp *commonconfig.Response + ) + + BeforeEach(func() { + resp = &commonconfig.Response{ + CACerts: [][]byte{[]byte("cacert")}, + IntermediateCerts: [][]byte{[]byte("intercert")}, + AdminCerts: [][]byte{[]byte("admincert")}, + SignCert: []byte("signcert"), + Keystore: []byte("key"), + } + }) + + It("returns an error if fails to create a secret", func() { + msg := "admin certs error" + mockClient.CreateOrUpdateReturnsOnCall(0, errors.New(msg)) + + err := peerinitializer.GenerateSecrets("ecert", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create admin certs secret: " + msg)) + }) + + It("generates", func() { + err := peerinitializer.GenerateSecrets("ecert", instance, resp) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("check for missing crypto", func() { + It("returns true, if missing any crypto", func() { + mockValidator.CheckEcertCryptoReturns(errors.New("not found")) + missing := peerinitializer.MissingCrypto(instance) + Expect(missing).To(Equal(true)) + }) + + It("returns false, if all crypto found and is in proper format", func() { + missing := peerinitializer.MissingCrypto(instance) + Expect(missing).To(Equal(false)) + }) + }) + + Context("check if admin certs need to be updated", func() { + BeforeEach(func() { + instance.Spec.Secret.Enrollment.Component.AdminCerts = []string{testcert} + + testCertBytes, err := base64.StdEncoding.DecodeString(testcert) + Expect(err).NotTo(HaveOccurred()) + + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + s := obj.(*corev1.Secret) + s.Data = map[string][]byte{"cert.pem": testCertBytes} + } + return nil + } + }) + + It("does not return an error if it fails to find admin secret", func() { + errMsg := "failed to find admin certs secret" + mockClient.GetReturns(errors.New(errMsg)) + _, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).NotTo(HaveOccurred()) + }) + + When("admin certs updated as part of enrollment spec", func() { + BeforeEach(func() { + instance.Spec.Secret.MSP = nil + }) + + It("returns false when the same cert in spec as current admin certs secret", func() { + needUpdating, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(needUpdating).To(Equal(false)) + }) + + It("returns an error if non-base64 encoded string passed as cert", func() { + instance.Spec.Secret.Enrollment.Component.AdminCerts = []string{"foo"} + _, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("illegal base64 data")) + }) + + It("returns true when the different cert in spec as current admin certs secret", func() { + instance.Spec.Secret.Enrollment.Component.AdminCerts = []string{testkey} + needUpdating, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(needUpdating).To(Equal(true)) + }) + }) + + When("admin certs updated as part of MSP spec", func() { + BeforeEach(func() { + instance.Spec.Secret.Enrollment = nil + }) + + It("returns false when the same cert in spec as current admin certs secret", func() { + needUpdating, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(needUpdating).To(Equal(false)) + }) + + It("returns an error if non-base64 encoded string passed as cert", func() { + instance.Spec.Secret.MSP.Component.AdminCerts = []string{"foo"} + _, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("illegal base64 data")) + }) + + It("returns true when the diferent cert in spec as current admin certs secret", func() { + instance.Spec.Secret.MSP.Component.AdminCerts = []string{testkey} + needUpdating, err := peerinitializer.CheckIfAdminCertsUpdated(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(needUpdating).To(Equal(true)) + }) + }) + }) +}) + +// BeforeEach(func() { +// testCertBytes, err := base64.StdEncoding.DecodeString(testcert) +// Expect(err).NotTo(HaveOccurred()) + +// mockClient = &mocks.Client{} +// mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj runtime.Object) error { +// switch obj.(type) { +// case *corev1.Secret: +// s := obj.(*corev1.Secret) +// s.Data = map[string][]byte{"cert.pem": testCertBytes} +// } +// return nil +// } + +// resp := &commonconfig.Response{ +// CACerts: [][]byte{[]byte("cacert")}, +// IntermediateCerts: [][]byte{[]byte("intercert")}, +// SignCert: []byte("cert"), +// Keystore: []byte("key"), +// } +// _ = resp + +// peerInitializer = &initializer.Initializer{ +// Client: mockClient, +// } + +// enrollment := ¤t.Enrollment{ +// CAHost: "localhost", +// CAPort: "7054", +// EnrollID: "admin", +// EnrollSecret: "adminpw", +// CATLS: ¤t.CATLS{ +// CACert: testcert, +// }, +// } +// tlsenrollment := enrollment.DeepCopy() + +// msp := ¤t.MSP{ +// KeyStore: testkey, +// SignCerts: testcert, +// AdminCerts: []string{testcert}, +// CACerts: []string{testcert}, +// } +// tlsmsp := msp.DeepCopy() + +// instance = ¤t.IBPPeer{ +// Spec: current.IBPPeerSpec{ +// Secret: ¤t.SecretSpec{ +// Enrollment: ¤t.EnrollmentSpec{ +// Component: enrollment, +// TLS: tlsenrollment, +// }, +// MSP: ¤t.MSPSpec{ +// Component: msp, +// TLS: tlsmsp, +// }, +// }, +// }, +// } +// }) + +// Context("check admin certs for existence and proper data", func() { +// It("returns error, if secret not found", func() { +// errMsg := "ecert admincerts secret not found" +// mockClient.GetReturns(errors.New(errMsg)) +// err := peerInitializer.CheckAdminCerts(instance, "ecert") +// Expect(err).To(HaveOccurred()) +// Expect(err.Error()).To(Equal(errMsg)) +// }) + +// It("returns error, if secrets found but contains no data", func() { +// mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj runtime.Object) error { +// switch obj.(type) { +// case *corev1.Secret: +// s := obj.(*corev1.Secret) +// s.Data = nil +// } +// return nil +// } +// err := peerInitializer.CheckAdminCerts(instance, "ecert") +// Expect(err).To(HaveOccurred()) +// Expect(err.Error()).To(Equal("no admin certificates found in admincerts secret")) +// }) + +// It("returns error, if secrets found but contains bad data", func() { +// mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj runtime.Object) error { +// switch obj.(type) { +// case *corev1.Secret: +// s := obj.(*corev1.Secret) +// s.Data = map[string][]byte{"cert.pem": []byte("foo")} +// } +// return nil +// } +// err := peerInitializer.CheckAdminCerts(instance, "ecert") +// Expect(err).To(HaveOccurred()) +// Expect(err.Error()).To(Equal("not a proper admin cert: failed to get certificate block")) +// }) + +// It("returns no error, if secret found and contains proper data", func() { +// err := peerInitializer.CheckAdminCerts(instance, "ecert") +// Expect(err).NotTo(HaveOccurred()) +// }) +// }) + +// Context("check ca certs for existence and proper data", func() { +// It("returns error, if secret not found", func() { +// errMsg := "ecert cacerts secret not found" +// mockClient.GetReturns(errors.New(errMsg)) +// err := peerInitializer.CheckCACerts(instance, "ecert") +// Expect(err).To(HaveOccurred()) +// Expect(err.Error()).To(Equal(errMsg)) +// }) + +// It("returns error, if secrets found but contains no data", func() { +// mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj runtime.Object) error { +// switch obj.(type) { +// case *corev1.Secret: +// s := obj.(*corev1.Secret) +// s.Data = nil +// } +// return nil +// } +// err := peerInitializer.CheckCACerts(instance, "ecert") +// Expect(err).To(HaveOccurred()) +// Expect(err.Error()).To(Equal("no ca certificates found in cacerts secret")) +// }) + +// It("returns error, if secrets found but contains bad data", func() { +// mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj runtime.Object) error { +// switch obj.(type) { +// case *corev1.Secret: +// s := obj.(*corev1.Secret) +// s.Data = map[string][]byte{"cert.pem": []byte("foo")} +// } +// return nil +// } +// err := peerInitializer.CheckCACerts(instance, "ecert") +// Expect(err).To(HaveOccurred()) +// Expect(err.Error()).To(Equal("not a proper ca cert: failed to get certificate block")) +// }) + +// It("returns no error, if secret found and contains proper data", func() { +// err := peerInitializer.CheckCACerts(instance, "ecert") +// Expect(err).NotTo(HaveOccurred()) +// }) +// }) + +// }) diff --git a/pkg/initializer/peer/mocks/client.go b/pkg/initializer/peer/mocks/client.go new file mode 100644 index 00000000..ee14505d --- /dev/null +++ b/pkg/initializer/peer/mocks/client.go @@ -0,0 +1,746 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Client struct { + CreateStub func(context.Context, client.Object, ...controllerclient.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + CreateOrUpdateStub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + PatchStatusStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchStatusMutex sync.RWMutex + patchStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchStatusReturns struct { + result1 error + } + patchStatusReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...controllerclient.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + UpdateStatusStub func(context.Context, client.Object, ...client.UpdateOption) error + updateStatusMutex sync.RWMutex + updateStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateStatusReturns struct { + result1 error + } + updateStatusReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) Create(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Client) CreateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Client) CreateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdate(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOrUpdateOption) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *Client) CreateOrUpdateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *Client) CreateOrUpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOrUpdateOption) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *Client) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *Client) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Client) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Client) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Client) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Client) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *Client) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *Client) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatus(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchStatusMutex.Lock() + ret, specificReturn := fake.patchStatusReturnsOnCall[len(fake.patchStatusArgsForCall)] + fake.patchStatusArgsForCall = append(fake.patchStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStatusStub + fakeReturns := fake.patchStatusReturns + fake.recordInvocation("PatchStatus", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchStatusCallCount() int { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + return len(fake.patchStatusArgsForCall) +} + +func (fake *Client) PatchStatusCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = stub +} + +func (fake *Client) PatchStatusArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + argsForCall := fake.patchStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchStatusReturns(result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + fake.patchStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatusReturnsOnCall(i int, result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + if fake.patchStatusReturnsOnCall == nil { + fake.patchStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Update(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Client) UpdateCalls(stub func(context.Context, client.Object, ...controllerclient.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Client) UpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatus(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateStatusMutex.Lock() + ret, specificReturn := fake.updateStatusReturnsOnCall[len(fake.updateStatusArgsForCall)] + fake.updateStatusArgsForCall = append(fake.updateStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStatusStub + fakeReturns := fake.updateStatusReturns + fake.recordInvocation("UpdateStatus", []interface{}{arg1, arg2, arg3}) + fake.updateStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateStatusCallCount() int { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + return len(fake.updateStatusArgsForCall) +} + +func (fake *Client) UpdateStatusCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = stub +} + +func (fake *Client) UpdateStatusArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + argsForCall := fake.updateStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateStatusReturns(result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + fake.updateStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatusReturnsOnCall(i int, result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + if fake.updateStatusReturnsOnCall == nil { + fake.updateStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ controllerclient.Client = new(Client) diff --git a/pkg/initializer/peer/mocks/ibppeer.go b/pkg/initializer/peer/mocks/ibppeer.go new file mode 100644 index 00000000..eaf751a3 --- /dev/null +++ b/pkg/initializer/peer/mocks/ibppeer.go @@ -0,0 +1,312 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" +) + +type IBPPeer struct { + DeliveryClientCryptoStub func() map[string][]byte + deliveryClientCryptoMutex sync.RWMutex + deliveryClientCryptoArgsForCall []struct { + } + deliveryClientCryptoReturns struct { + result1 map[string][]byte + } + deliveryClientCryptoReturnsOnCall map[int]struct { + result1 map[string][]byte + } + GenerateCryptoStub func() (*config.CryptoResponse, error) + generateCryptoMutex sync.RWMutex + generateCryptoArgsForCall []struct { + } + generateCryptoReturns struct { + result1 *config.CryptoResponse + result2 error + } + generateCryptoReturnsOnCall map[int]struct { + result1 *config.CryptoResponse + result2 error + } + GetConfigStub func() initializer.CoreConfig + getConfigMutex sync.RWMutex + getConfigArgsForCall []struct { + } + getConfigReturns struct { + result1 initializer.CoreConfig + } + getConfigReturnsOnCall map[int]struct { + result1 initializer.CoreConfig + } + OverrideConfigStub func(initializer.CoreConfig) error + overrideConfigMutex sync.RWMutex + overrideConfigArgsForCall []struct { + arg1 initializer.CoreConfig + } + overrideConfigReturns struct { + result1 error + } + overrideConfigReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *IBPPeer) DeliveryClientCrypto() map[string][]byte { + fake.deliveryClientCryptoMutex.Lock() + ret, specificReturn := fake.deliveryClientCryptoReturnsOnCall[len(fake.deliveryClientCryptoArgsForCall)] + fake.deliveryClientCryptoArgsForCall = append(fake.deliveryClientCryptoArgsForCall, struct { + }{}) + stub := fake.DeliveryClientCryptoStub + fakeReturns := fake.deliveryClientCryptoReturns + fake.recordInvocation("DeliveryClientCrypto", []interface{}{}) + fake.deliveryClientCryptoMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPPeer) DeliveryClientCryptoCallCount() int { + fake.deliveryClientCryptoMutex.RLock() + defer fake.deliveryClientCryptoMutex.RUnlock() + return len(fake.deliveryClientCryptoArgsForCall) +} + +func (fake *IBPPeer) DeliveryClientCryptoCalls(stub func() map[string][]byte) { + fake.deliveryClientCryptoMutex.Lock() + defer fake.deliveryClientCryptoMutex.Unlock() + fake.DeliveryClientCryptoStub = stub +} + +func (fake *IBPPeer) DeliveryClientCryptoReturns(result1 map[string][]byte) { + fake.deliveryClientCryptoMutex.Lock() + defer fake.deliveryClientCryptoMutex.Unlock() + fake.DeliveryClientCryptoStub = nil + fake.deliveryClientCryptoReturns = struct { + result1 map[string][]byte + }{result1} +} + +func (fake *IBPPeer) DeliveryClientCryptoReturnsOnCall(i int, result1 map[string][]byte) { + fake.deliveryClientCryptoMutex.Lock() + defer fake.deliveryClientCryptoMutex.Unlock() + fake.DeliveryClientCryptoStub = nil + if fake.deliveryClientCryptoReturnsOnCall == nil { + fake.deliveryClientCryptoReturnsOnCall = make(map[int]struct { + result1 map[string][]byte + }) + } + fake.deliveryClientCryptoReturnsOnCall[i] = struct { + result1 map[string][]byte + }{result1} +} + +func (fake *IBPPeer) GenerateCrypto() (*config.CryptoResponse, error) { + fake.generateCryptoMutex.Lock() + ret, specificReturn := fake.generateCryptoReturnsOnCall[len(fake.generateCryptoArgsForCall)] + fake.generateCryptoArgsForCall = append(fake.generateCryptoArgsForCall, struct { + }{}) + stub := fake.GenerateCryptoStub + fakeReturns := fake.generateCryptoReturns + fake.recordInvocation("GenerateCrypto", []interface{}{}) + fake.generateCryptoMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *IBPPeer) GenerateCryptoCallCount() int { + fake.generateCryptoMutex.RLock() + defer fake.generateCryptoMutex.RUnlock() + return len(fake.generateCryptoArgsForCall) +} + +func (fake *IBPPeer) GenerateCryptoCalls(stub func() (*config.CryptoResponse, error)) { + fake.generateCryptoMutex.Lock() + defer fake.generateCryptoMutex.Unlock() + fake.GenerateCryptoStub = stub +} + +func (fake *IBPPeer) GenerateCryptoReturns(result1 *config.CryptoResponse, result2 error) { + fake.generateCryptoMutex.Lock() + defer fake.generateCryptoMutex.Unlock() + fake.GenerateCryptoStub = nil + fake.generateCryptoReturns = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *IBPPeer) GenerateCryptoReturnsOnCall(i int, result1 *config.CryptoResponse, result2 error) { + fake.generateCryptoMutex.Lock() + defer fake.generateCryptoMutex.Unlock() + fake.GenerateCryptoStub = nil + if fake.generateCryptoReturnsOnCall == nil { + fake.generateCryptoReturnsOnCall = make(map[int]struct { + result1 *config.CryptoResponse + result2 error + }) + } + fake.generateCryptoReturnsOnCall[i] = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *IBPPeer) GetConfig() initializer.CoreConfig { + fake.getConfigMutex.Lock() + ret, specificReturn := fake.getConfigReturnsOnCall[len(fake.getConfigArgsForCall)] + fake.getConfigArgsForCall = append(fake.getConfigArgsForCall, struct { + }{}) + stub := fake.GetConfigStub + fakeReturns := fake.getConfigReturns + fake.recordInvocation("GetConfig", []interface{}{}) + fake.getConfigMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPPeer) GetConfigCallCount() int { + fake.getConfigMutex.RLock() + defer fake.getConfigMutex.RUnlock() + return len(fake.getConfigArgsForCall) +} + +func (fake *IBPPeer) GetConfigCalls(stub func() initializer.CoreConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = stub +} + +func (fake *IBPPeer) GetConfigReturns(result1 initializer.CoreConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = nil + fake.getConfigReturns = struct { + result1 initializer.CoreConfig + }{result1} +} + +func (fake *IBPPeer) GetConfigReturnsOnCall(i int, result1 initializer.CoreConfig) { + fake.getConfigMutex.Lock() + defer fake.getConfigMutex.Unlock() + fake.GetConfigStub = nil + if fake.getConfigReturnsOnCall == nil { + fake.getConfigReturnsOnCall = make(map[int]struct { + result1 initializer.CoreConfig + }) + } + fake.getConfigReturnsOnCall[i] = struct { + result1 initializer.CoreConfig + }{result1} +} + +func (fake *IBPPeer) OverrideConfig(arg1 initializer.CoreConfig) error { + fake.overrideConfigMutex.Lock() + ret, specificReturn := fake.overrideConfigReturnsOnCall[len(fake.overrideConfigArgsForCall)] + fake.overrideConfigArgsForCall = append(fake.overrideConfigArgsForCall, struct { + arg1 initializer.CoreConfig + }{arg1}) + stub := fake.OverrideConfigStub + fakeReturns := fake.overrideConfigReturns + fake.recordInvocation("OverrideConfig", []interface{}{arg1}) + fake.overrideConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *IBPPeer) OverrideConfigCallCount() int { + fake.overrideConfigMutex.RLock() + defer fake.overrideConfigMutex.RUnlock() + return len(fake.overrideConfigArgsForCall) +} + +func (fake *IBPPeer) OverrideConfigCalls(stub func(initializer.CoreConfig) error) { + fake.overrideConfigMutex.Lock() + defer fake.overrideConfigMutex.Unlock() + fake.OverrideConfigStub = stub +} + +func (fake *IBPPeer) OverrideConfigArgsForCall(i int) initializer.CoreConfig { + fake.overrideConfigMutex.RLock() + defer fake.overrideConfigMutex.RUnlock() + argsForCall := fake.overrideConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *IBPPeer) OverrideConfigReturns(result1 error) { + fake.overrideConfigMutex.Lock() + defer fake.overrideConfigMutex.Unlock() + fake.OverrideConfigStub = nil + fake.overrideConfigReturns = struct { + result1 error + }{result1} +} + +func (fake *IBPPeer) OverrideConfigReturnsOnCall(i int, result1 error) { + fake.overrideConfigMutex.Lock() + defer fake.overrideConfigMutex.Unlock() + fake.OverrideConfigStub = nil + if fake.overrideConfigReturnsOnCall == nil { + fake.overrideConfigReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.overrideConfigReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *IBPPeer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deliveryClientCryptoMutex.RLock() + defer fake.deliveryClientCryptoMutex.RUnlock() + fake.generateCryptoMutex.RLock() + defer fake.generateCryptoMutex.RUnlock() + fake.getConfigMutex.RLock() + defer fake.getConfigMutex.RUnlock() + fake.overrideConfigMutex.RLock() + defer fake.overrideConfigMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *IBPPeer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ initializer.IBPPeer = new(IBPPeer) diff --git a/pkg/initializer/peer/peer.go b/pkg/initializer/peer/peer.go new file mode 100644 index 00000000..6fcd9323 --- /dev/null +++ b/pkg/initializer/peer/peer.go @@ -0,0 +1,84 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer + +import ( + "fmt" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/pkg/errors" +) + +type CoreConfig interface { + MergeWith(interface{}, bool) error + GetAddressOverrides() []v1.AddressOverride + ToBytes() ([]byte, error) + UsingPKCS11() bool + SetPKCS11Defaults(bool) + GetBCCSPSection() *commonapi.BCCSP + SetBCCSPLibrary(string) +} + +type Peer struct { + Config CoreConfig + Cryptos *commonconfig.Cryptos + UsingHSMProxy bool +} + +func (p *Peer) OverrideConfig(newConfig CoreConfig) (err error) { + log.Info("Overriding peer config values from spec") + err = p.Config.MergeWith(newConfig, p.UsingHSMProxy) + if err != nil { + return errors.Wrapf(err, "failed to merge override configuration") + } + + return nil +} + +func (p *Peer) GenerateCrypto() (*commonconfig.CryptoResponse, error) { + log.Info("Generating peer's crypto material") + if p.Cryptos != nil { + response, err := p.Cryptos.GenerateCryptoResponse() + if err != nil { + return nil, err + } + return response, nil + } + + return &config.CryptoResponse{}, nil +} + +func (p *Peer) GetConfig() CoreConfig { + return p.Config +} + +func (p *Peer) DeliveryClientCrypto() map[string][]byte { + data := map[string][]byte{} + + if p.Config != nil { + for i, addr := range p.Config.GetAddressOverrides() { + data[fmt.Sprintf("cert%d.pem", i)] = addr.GetCertBytes() + } + } + + return data +} diff --git a/pkg/initializer/peer/peer_suite_test.go b/pkg/initializer/peer/peer_suite_test.go new file mode 100644 index 00000000..25d28c17 --- /dev/null +++ b/pkg/initializer/peer/peer_suite_test.go @@ -0,0 +1,51 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + "net/http" + "net/http/httptest" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} + +var ( + server *httptest.Server +) + +var _ = BeforeSuite(func() { + // Start a local HTTP server + server = httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Test request parameters + Expect(req.URL.String()).To(Equal("/cainfo")) + return + })) +}) + +var _ = AfterSuite(func() { + // Close the server when test finishes + server.Close() +}) diff --git a/pkg/initializer/peer/peer_test.go b/pkg/initializer/peer/peer_test.go new file mode 100644 index 00000000..8c2bb826 --- /dev/null +++ b/pkg/initializer/peer/peer_test.go @@ -0,0 +1,91 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initializer_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + configmocks "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config/mocks" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" +) + +var _ = Describe("peer", func() { + var ( + peer *initializer.Peer + + mockCrypto *configmocks.Crypto + ) + + BeforeEach(func() { + mockCrypto = &configmocks.Crypto{} + + peer = &initializer.Peer{ + Config: &config.Core{}, + Cryptos: &commonconfig.Cryptos{ + Enrollment: mockCrypto, + }, + } + }) + + Context("config override", func() { + When("using hsm proxy", func() { + BeforeEach(func() { + peer.UsingHSMProxy = true + }) + + It("overrides peer's config", func() { + core := &config.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &commonapi.PKCS11Opts{}, + }, + }, + }, + } + + err := peer.OverrideConfig(core) + Expect(err).NotTo(HaveOccurred()) + + Expect(core.Peer.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + }) + }) + }) + + Context("generate crypto", func() { + It("returns error if unable to get crypto", func() { + mockCrypto.GetCryptoReturns(nil, errors.New("get crypto error")) + _, err := peer.GenerateCrypto() + Expect(err).To(HaveOccurred()) + }) + + It("gets crypto", func() { + resp, err := peer.GenerateCrypto() + Expect(err).NotTo(HaveOccurred()) + Expect(resp).NotTo(BeNil()) + }) + }) +}) diff --git a/pkg/initializer/validator/validator.go b/pkg/initializer/validator/validator.go new file mode 100644 index 00000000..feb74efe --- /dev/null +++ b/pkg/initializer/validator/validator.go @@ -0,0 +1,237 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package validator + +import ( + "context" + "crypto/x509" + "encoding/pem" + "strings" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type Validator struct { + Client k8sclient.Client + HSMEnabled bool +} + +func (v *Validator) CheckAdminCerts(instance v1.Object, prefix string) error { + //No-op + return nil +} + +func (v *Validator) CheckEcertCrypto(instance v1.Object, name string) error { + prefix := "ecert-" + name + + // CA certs verification + err := v.CheckCACerts(instance, prefix) + if err != nil { + return err + } + + if v.HSMEnabled { + err = v.CheckCert(instance, prefix) + if err != nil { + return err + } + } else { + err = v.CheckCertAndKey(instance, prefix) + if err != nil { + return err + } + } + + return nil +} + +func (v *Validator) CheckTLSCrypto(instance v1.Object, name string) error { + prefix := "tls-" + name + + // CA certs verification + err := v.CheckCACerts(instance, prefix) + if err != nil { + return err + } + + err = v.CheckCertAndKey(instance, prefix) + if err != nil { + return err + } + + return nil +} + +func (v *Validator) CheckClientAuthCrypto(instance v1.Object, name string) error { + prefix := "clientauth-" + name + + // CA cert verification + err := v.CheckCACerts(instance, prefix) + if err != nil { + return err + } + + err = v.CheckCertAndKey(instance, prefix) + if err != nil { + return err + } + + return nil +} + +func (v *Validator) CheckCACerts(instance v1.Object, prefix string) error { + namespacedName := types.NamespacedName{ + Name: prefix + "-cacerts", + Namespace: instance.GetNamespace(), + } + + caCerts := &corev1.Secret{} + err := v.Client.Get(context.TODO(), namespacedName, caCerts) + if err != nil { + return err + } + + if caCerts.Data == nil || len(caCerts.Data) == 0 { + return errors.New("no ca certificates found in cacerts secret") + } + + err = ValidateCerts(caCerts.Data) + if err != nil { + return errors.Wrap(err, "not a proper ca cert") + } + + return nil +} + +func (v *Validator) CheckCertAndKey(instance v1.Object, prefix string) error { + var err error + + // Sign cert verification + err = v.CheckCert(instance, prefix) + if err != nil { + return err + } + + // Key verification + err = v.CheckKey(instance, prefix) + if err != nil { + return err + } + + return nil +} + +func (v *Validator) CheckCert(instance v1.Object, prefix string) error { + namespacedName := types.NamespacedName{ + Namespace: instance.GetNamespace(), + } + + signCert := &corev1.Secret{} + namespacedName.Name = prefix + "-signcert" + err := v.Client.Get(context.TODO(), namespacedName, signCert) + if err != nil { + return err + } + + err = ValidateCert(signCert.Data["cert.pem"]) + if err != nil { + return errors.Wrap(err, "not a proper sign cert") + } + + return nil +} + +func (v *Validator) CheckKey(instance v1.Object, prefix string) error { + namespacedName := types.NamespacedName{ + Namespace: instance.GetNamespace(), + } + + key := &corev1.Secret{} + namespacedName.Name = prefix + "-keystore" + err := v.Client.Get(context.TODO(), namespacedName, key) + if err != nil { + return err + } + + err = ValidateKey(key.Data["key.pem"]) + if err != nil { + return errors.Wrap(err, "not a proper key") + } + + return nil +} + +func (v *Validator) SetHSMEnabled(enabled bool) { + v.HSMEnabled = enabled +} + +func CheckError(err error) bool { + if err == nil { + return false + } + + if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "not valid") { + return true + } + + return false +} + +func ValidateCerts(certs map[string][]byte) error { + for _, cert := range certs { + err := ValidateCert(cert) + if err != nil { + return err + } + } + + return nil +} + +func ValidateCert(cert []byte) error { + block, _ := pem.Decode(cert) + if block == nil { + return errors.New("failed to get certificate block") + } + + _, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return errors.Wrap(err, "not valid") + } + + return nil +} + +func ValidateKey(key []byte) error { + block, _ := pem.Decode(key) + if block == nil { + return errors.New("failed to get key block") + } + + _, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return errors.Wrap(err, "not valid") + } + + return nil +} diff --git a/pkg/initializer/validator/validator_suite_test.go b/pkg/initializer/validator/validator_suite_test.go new file mode 100644 index 00000000..2b16c8a4 --- /dev/null +++ b/pkg/initializer/validator/validator_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package validator_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestValidator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Validator Suite") +} diff --git a/pkg/initializer/validator/validator_test.go b/pkg/initializer/validator/validator_test.go new file mode 100644 index 00000000..28e9d1f4 --- /dev/null +++ b/pkg/initializer/validator/validator_test.go @@ -0,0 +1,148 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package validator_test + +import ( + "context" + "encoding/base64" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + initvalidator "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/validator" +) + +const ( + testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + testkey = "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3hRUXdSVFFpVUcwREo1UHoKQTJSclhIUEtCelkxMkxRa0MvbVlveWo1bEhDaFJBTkNBQVN5bE1YLzFqdDlmUGt1RTZ0anpvSTlQbGt4LzZuVQpCMHIvMU56TTdrYnBjUk8zQ3RIeXQ2TXlQR21FOUZUN29pYXphU3J1TW9JTDM0VGdBdUpIOU9ZWQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" +) + +var _ = Describe("validator", func() { + var ( + validator *initvalidator.Validator + instance *current.IBPPeer + mockClient *controllermocks.Client + + testCertBytes []byte + testKeyBytes []byte + ) + + BeforeEach(func() { + var err error + + instance = ¤t.IBPPeer{} + mockClient = &controllermocks.Client{} + + testCertBytes, err = base64.StdEncoding.DecodeString(testcert) + Expect(err).NotTo(HaveOccurred()) + testKeyBytes, err = base64.StdEncoding.DecodeString(testkey) + Expect(err).NotTo(HaveOccurred()) + + mockClient.GetStub = func(ctx context.Context, t types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + if strings.Contains(t.Name, "keystore") { + s := obj.(*corev1.Secret) + s.Data = map[string][]byte{ + "key.pem": testKeyBytes, + } + } else { + s := obj.(*corev1.Secret) + s.Data = map[string][]byte{ + "cert.pem": testCertBytes, + } + } + } + return nil + } + + validator = &initvalidator.Validator{ + Client: mockClient, + } + }) + + Context("check ecert certs", func() { + It("returns an error if secret contains no certs", func() { + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + s := obj.(*corev1.Secret) + s.Data = nil + } + return nil + } + + err := validator.CheckEcertCrypto(instance, instance.GetName()) + Expect(err).To(HaveOccurred()) + }) + + It("returns no error if a valid cert found in secret", func() { + err := validator.CheckEcertCrypto(instance, instance.GetName()) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("check tls certs", func() { + It("returns an error if secret contains no certs", func() { + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + s := obj.(*corev1.Secret) + s.Data = nil + } + return nil + } + + err := validator.CheckTLSCrypto(instance, instance.GetName()) + Expect(err).To(HaveOccurred()) + }) + + It("returns no error if a valid cert found in secret", func() { + err := validator.CheckTLSCrypto(instance, instance.GetName()) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("check client auth certs", func() { + It("returns an error if secret contains no certs", func() { + mockClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + s := obj.(*corev1.Secret) + s.Data = nil + } + return nil + } + + err := validator.CheckClientAuthCrypto(instance, instance.GetName()) + Expect(err).To(HaveOccurred()) + }) + + It("returns no error if a valid cert found in secret", func() { + err := validator.CheckClientAuthCrypto(instance, instance.GetName()) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/k8s/clientset/client.go b/pkg/k8s/clientset/client.go new file mode 100644 index 00000000..b7ab8631 --- /dev/null +++ b/pkg/k8s/clientset/client.go @@ -0,0 +1,68 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clientset + +import ( + "context" + "fmt" + "strings" + + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("clientset") + +type Client struct { + clientset.Clientset +} + +func New(config *rest.Config) (*Client, error) { + clientSet, err := clientset.NewForConfig(config) + if err != nil { + return nil, err + } + return &Client{*clientSet}, nil +} + +func (c *Client) CreateCRD(crd *extv1.CustomResourceDefinition) (*extv1.CustomResourceDefinition, error) { + log.Info(fmt.Sprintf("Creating CRD '%s'", crd.Name)) + result, err := c.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, v1.CreateOptions{}) + if err != nil { + if strings.Contains(err.Error(), "already exists") { + existingcrd, err := c.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, v1.GetOptions{}) + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("Updating CRD '%s'", crd.Name)) + existingcrd.Spec = crd.Spec + result, err = c.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), existingcrd, v1.UpdateOptions{}) + if err != nil { + return nil, err + } + } else { + log.Error(err, "Error creating CRD", "CRD", crd.Name) + } + } + return result, nil +} diff --git a/pkg/k8s/controllerclient/client.go b/pkg/k8s/controllerclient/client.go new file mode 100644 index 00000000..1045cf48 --- /dev/null +++ b/pkg/k8s/controllerclient/client.go @@ -0,0 +1,296 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllerclient + +import ( + "context" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +//go:generate counterfeiter -o ../../controller/mocks/client.go -fake-name Client . Client + +type Client interface { + Get(ctx context.Context, key k8sclient.ObjectKey, obj k8sclient.Object) error + List(ctx context.Context, list k8sclient.ObjectList, opts ...k8sclient.ListOption) error + Create(ctx context.Context, obj k8sclient.Object, opts ...CreateOption) error + CreateOrUpdate(ctx context.Context, obj k8sclient.Object, opts ...CreateOrUpdateOption) error + Delete(ctx context.Context, obj k8sclient.Object, opts ...k8sclient.DeleteOption) error + Patch(ctx context.Context, obj k8sclient.Object, patch k8sclient.Patch, opts ...PatchOption) error + PatchStatus(ctx context.Context, obj k8sclient.Object, patch k8sclient.Patch, opts ...PatchOption) error + Update(ctx context.Context, obj k8sclient.Object, opts ...UpdateOption) error + UpdateStatus(ctx context.Context, obj k8sclient.Object, opts ...k8sclient.UpdateOption) error +} + +// GlobalConfig applies the global configuration defined in operator's config to appropriate +// kubernetes resources +type GlobalConfig interface { + Apply(runtime.Object) +} + +type ClientImpl struct { + k8sClient k8sclient.Client + GlobalConfig GlobalConfig +} + +func New(c k8sclient.Client, gc GlobalConfig) *ClientImpl { + return &ClientImpl{ + k8sClient: c, + GlobalConfig: gc, + } +} + +func (c *ClientImpl) Get(ctx context.Context, key k8sclient.ObjectKey, obj k8sclient.Object) error { + err := c.k8sClient.Get(ctx, key, obj) + if err != nil { + return err + } + return nil +} + +func (c *ClientImpl) List(ctx context.Context, list k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + err := c.k8sClient.List(ctx, list, opts...) + if err != nil { + return err + } + return nil +} + +func (c *ClientImpl) Create(ctx context.Context, obj k8sclient.Object, opts ...CreateOption) error { + var createOpts []k8sclient.CreateOption + + c.GlobalConfig.Apply(obj) + + if opts != nil && len(opts) > 0 { + if err := setControllerReference(opts[0].Owner, obj, opts[0].Scheme); err != nil { + return err + } + createOpts = opts[0].Opts + } + + err := c.k8sClient.Create(ctx, obj, createOpts...) + if err != nil { + return util.IgnoreAlreadyExistError(err) + } + return nil +} + +func (c *ClientImpl) Patch(ctx context.Context, obj k8sclient.Object, patch k8sclient.Patch, opts ...PatchOption) error { + var patchOpts []k8sclient.PatchOption + + c.GlobalConfig.Apply(obj) + + if opts != nil && len(opts) > 0 { + if opts[0].Resilient != nil { + return c.ResilientPatch(ctx, obj, opts[0].Resilient, opts[0].Opts...) + } + + patchOpts = opts[0].Opts + } + + err := c.k8sClient.Patch(ctx, obj, patch, patchOpts...) + if err != nil { + return err + } + return nil +} + +func (c *ClientImpl) ResilientPatch(ctx context.Context, obj k8sclient.Object, resilient *ResilientPatch, opts ...k8sclient.PatchOption) error { + retry := resilient.Retry + into := resilient.Into + strategy := resilient.Strategy + + c.GlobalConfig.Apply(obj) + + for i := 0; i < retry; i++ { + err := c.resilientPatch(ctx, obj, strategy, into, opts...) + if err != nil { + if i == retry { + return err + } + if k8serrors.IsConflict(err) { + time.Sleep(2 * time.Second) + continue + } + return err + } + } + + return nil +} + +func (c *ClientImpl) resilientPatch(ctx context.Context, obj k8sclient.Object, strategy func(k8sclient.Object) k8sclient.Patch, into k8sclient.Object, opts ...k8sclient.PatchOption) error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + + err := c.Get(ctx, key, into) + if err != nil { + return err + } + + err = c.k8sClient.Patch(ctx, obj, strategy(into), opts...) + if err != nil { + return err + } + return nil +} + +// If utilizing resilient option, nil can be passed for patch parameter +func (c *ClientImpl) PatchStatus(ctx context.Context, obj k8sclient.Object, patch k8sclient.Patch, opts ...PatchOption) error { + var patchOpts []k8sclient.PatchOption + + if opts != nil && len(opts) > 0 { + if opts[0].Resilient != nil { + return c.ResilientPatchStatus(ctx, obj, opts[0].Resilient, opts[0].Opts...) + } + + patchOpts = opts[0].Opts + } + + err := c.k8sClient.Status().Patch(ctx, obj, patch, patchOpts...) + if err != nil { + return err + } + return nil +} + +func (c *ClientImpl) ResilientPatchStatus(ctx context.Context, obj k8sclient.Object, resilient *ResilientPatch, opts ...k8sclient.PatchOption) error { + retry := resilient.Retry + into := resilient.Into + strategy := resilient.Strategy + + for i := 0; i < retry; i++ { + err := c.resilientPatchStatus(ctx, obj, strategy, into, opts...) + if err != nil { + if i == retry { + return err + } + if k8serrors.IsConflict(err) { + time.Sleep(2 * time.Second) + continue + } + return err + } + } + + return nil +} + +func (c *ClientImpl) resilientPatchStatus(ctx context.Context, obj k8sclient.Object, strategy func(k8sclient.Object) k8sclient.Patch, into k8sclient.Object, opts ...k8sclient.PatchOption) error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + + err := c.Get(ctx, key, into) + if err != nil { + return err + } + + err = c.k8sClient.Status().Patch(ctx, obj, strategy(into), opts...) + if err != nil { + return err + } + return nil +} + +// NOTE: Currently, Resilient Update is not supported as it requires more specific +// implementation based on scenario. When possible, should utilize resilient Patch. +func (c *ClientImpl) Update(ctx context.Context, obj k8sclient.Object, opts ...UpdateOption) error { + var updateOpts []k8sclient.UpdateOption + + c.GlobalConfig.Apply(obj) + + if opts != nil && len(opts) > 0 { + if err := setControllerReference(opts[0].Owner, obj, opts[0].Scheme); err != nil { + return err + } + updateOpts = opts[0].Opts + } + + err := c.k8sClient.Update(ctx, obj, updateOpts...) + if err != nil { + return err + } + return nil +} + +// NOTE: Currently, Resilient UpdateStatus is not supported as it requires more specific +// implementation based on scenario. When possible, should utilize resilient PatchStatus. +func (c *ClientImpl) UpdateStatus(ctx context.Context, obj k8sclient.Object, opts ...k8sclient.UpdateOption) error { + err := c.k8sClient.Status().Update(ctx, obj, opts...) + if err != nil { + return err + } + return nil +} + +func (c *ClientImpl) Delete(ctx context.Context, obj k8sclient.Object, opts ...k8sclient.DeleteOption) error { + err := c.k8sClient.Delete(ctx, obj, opts...) + if err != nil { + return err + } + return nil +} + +// CreateOrUpdate does not support k8sclient.CreateOption or k8sclient.UpdateOption being passed as variadic parameters, +// if want to use opts use Create or Update methods +func (c *ClientImpl) CreateOrUpdate(ctx context.Context, obj k8sclient.Object, opts ...CreateOrUpdateOption) error { + if opts != nil && len(opts) > 0 { + if err := setControllerReference(opts[0].Owner, obj, opts[0].Scheme); err != nil { + return err + } + } + + c.GlobalConfig.Apply(obj) + + err := c.k8sClient.Create(ctx, obj) + if err != nil { + if k8serrors.IsAlreadyExists(err) { + return c.k8sClient.Update(ctx, obj) + } + return err + } + return nil +} + +func setControllerReference(owner v1.Object, obj v1.Object, scheme *runtime.Scheme) error { + if owner != nil && obj != nil && scheme != nil { + err := controllerutil.SetControllerReference(owner, obj, scheme) + if err != nil { + if _, ok := err.(*controllerutil.AlreadyOwnedError); ok { + return nil + } + return errors.Wrap(err, "controller reference error") + } + } + + return nil +} diff --git a/pkg/k8s/controllerclient/client_structs.go b/pkg/k8s/controllerclient/client_structs.go new file mode 100644 index 00000000..0529e983 --- /dev/null +++ b/pkg/k8s/controllerclient/client_structs.go @@ -0,0 +1,58 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllerclient + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// type Object interface { +// runtime.Object +// v1.Object +// } + +type CreateOption struct { + Owner v1.Object + Scheme *runtime.Scheme + Opts []k8sclient.CreateOption +} + +type PatchOption struct { + Resilient *ResilientPatch + Opts []k8sclient.PatchOption +} + +type ResilientPatch struct { + Retry int + Into k8sclient.Object + Strategy func(k8sclient.Object) k8sclient.Patch +} + +type UpdateOption struct { + Owner v1.Object + Scheme *runtime.Scheme + Opts []k8sclient.UpdateOption +} + +type CreateOrUpdateOption struct { + Owner v1.Object + Scheme *runtime.Scheme +} diff --git a/pkg/manager/resources/configmap/configmap_suite_test.go b/pkg/manager/resources/configmap/configmap_suite_test.go new file mode 100644 index 00000000..6656157b --- /dev/null +++ b/pkg/manager/resources/configmap/configmap_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configmap_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfigmap(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Configmap Suite") +} diff --git a/pkg/manager/resources/configmap/manager.go b/pkg/manager/resources/configmap/manager.go new file mode 100644 index 00000000..6cc278a1 --- /dev/null +++ b/pkg/manager/resources/configmap/manager.go @@ -0,0 +1,189 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configmap + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("configmap_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + ConfigMapFile string + Name string + Options map[string]interface{} + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.Name != "" { + return fmt.Sprintf("%s-%s", instance.GetName(), m.Name) + } + return fmt.Sprintf("%s", instance.GetName()) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + configMap := &corev1.ConfigMap{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, configMap) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating configmap '%s'", name)) + configMap, err := m.GetConfigMapBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), configMap, k8sclient.CreateOption{ + Owner: instance, + Scheme: m.Scheme, + }) + if err != nil { + return err + } + return nil + } + return err + } + + if update { + if m.OverrideFunc != nil { + log.Info(fmt.Sprintf("Updating configmap '%s'", name)) + err := m.OverrideFunc(instance, configMap, resources.Update, m.Options) + if err != nil { + return err + } + + err = m.Client.Update(context.TODO(), configMap, k8sclient.UpdateOption{ + Owner: instance, + Scheme: m.Scheme, + }) + if err != nil { + return err + } + return nil + } + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetConfigMapBasedOnCRFromFile(instance v1.Object) (*corev1.ConfigMap, error) { + configMap, err := util.GetConfigMapFromFile(m.ConfigMapFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading configmap configuration file: %s", m.ConfigMapFile)) + return nil, err + } + + configMap.Name = m.GetName(instance) + configMap.Namespace = instance.GetNamespace() + configMap.Labels = m.LabelsFunc(instance) + + return m.BasedOnCR(instance, configMap) +} + +func (m *Manager) BasedOnCR(instance v1.Object, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, configMap, resources.Create, m.Options) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidConfigMapCreateRequest, err.Error()) + } + } + + return configMap, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + cm := &corev1.ConfigMap{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + cm, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if cm == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), cm) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} diff --git a/pkg/manager/resources/configmap/manager_test.go b/pkg/manager/resources/configmap/manager_test.go new file mode 100644 index 00000000..38cc2810 --- /dev/null +++ b/pkg/manager/resources/configmap/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configmap_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/configmap" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("ConfigMap manager", func() { + var ( + mockKubeClient *mocks.Client + manager *configmap.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &configmap.Manager{ + ConfigMapFile: "../../../../definitions/console/console-configmap.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the configmap instance", func() { + It("does not try to create configmap if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("configmap does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.ConfigMapFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override configmap value fails", func() { + manager.OverrideFunc = func(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the ConfigMap fails", func() { + errMsg := "unable to create configmap" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull ConfigMap creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/container/container.go b/pkg/manager/resources/container/container.go new file mode 100644 index 00000000..e1a49ae5 --- /dev/null +++ b/pkg/manager/resources/container/container.go @@ -0,0 +1,237 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package container + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +type SecurityContext struct { + Privileged *bool + RunAsNonRoot *bool + RunAsUser *int64 + AllowPrivilegeEscalation *bool +} + +func New(container *corev1.Container) *Container { + return &Container{container} +} + +func LoadFromDeployment(deployment *appsv1.Deployment) map[string]Container { + containers := map[string]Container{} + for i, c := range deployment.Spec.Template.Spec.Containers { + containers[c.Name] = Container{&deployment.Spec.Template.Spec.Containers[i]} + } + for i, c := range deployment.Spec.Template.Spec.InitContainers { + containers[c.Name] = Container{&deployment.Spec.Template.Spec.InitContainers[i]} + } + return containers +} + +func LoadFromFile(file string) (*Container, error) { + container, err := util.GetContainerFromFile(file) + if err != nil { + return nil, errors.Wrap(err, "failed to read container file") + } + return &Container{container}, nil +} + +type Container struct { + *corev1.Container +} + +func (c *Container) DeleteEnv(name string) { + newEnvs := []corev1.EnvVar{} + for _, env := range c.Env { + if env.Name == name { + continue + } + newEnvs = append(newEnvs, env) + } + + c.Env = newEnvs +} + +func (c *Container) UpdateEnv(name, value string) { + var updated bool + + newEnvs := []corev1.EnvVar{} + for _, env := range c.Env { + if env.Name == name { + env.Value = value + updated = true + } + newEnvs = append(newEnvs, env) + } + + if updated { + c.Env = newEnvs + } else { + c.Env = append(newEnvs, corev1.EnvVar{Name: name, Value: value}) + } +} + +func (c *Container) AppendEnvStructIfMissing(envVar corev1.EnvVar) { + c.Env = util.AppendEnvIfMissing(c.Env, envVar) +} + +func (c *Container) AppendEnvIfMissing(name, value string) { + envVar := corev1.EnvVar{ + Name: name, + Value: value, + } + c.Env = util.AppendEnvIfMissing(c.Env, envVar) +} + +func (c *Container) AppendEnvIfMissingOverrideIfPresent(name, value string) { + envVar := corev1.EnvVar{ + Name: name, + Value: value, + } + c.Env = util.AppendEnvIfMissingOverrideIfPresent(c.Env, envVar) +} + +func (c *Container) SetImage(img, tag string) { + if img != "" { + if tag != "" { + c.Container.Image = image.Format(img, tag) + } else { + c.Container.Image = img + ":latest" + } + } +} + +func (c *Container) AppendVolumeMountStructIfMissing(volumeMount corev1.VolumeMount) { + c.VolumeMounts = util.AppendVolumeMountIfMissing(c.VolumeMounts, volumeMount) +} + +func (c *Container) AppendVolumeMountIfMissing(name, mountPath string) { + volumeMount := corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + } + c.VolumeMounts = util.AppendVolumeMountIfMissing(c.VolumeMounts, volumeMount) +} + +func (c *Container) AppendVolumeMountWithSubPathIfMissing(name, mountPath, subPath string) { + volumeMount := corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + SubPath: subPath, + } + c.VolumeMounts = util.AppendVolumeMountWithSubPathIfMissing(c.VolumeMounts, volumeMount) +} + +func (c *Container) AppendVolumeMountWithSubPath(name, mountPath, subPath string) { + volumeMount := corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + SubPath: subPath, + } + c.VolumeMounts = append(c.VolumeMounts, volumeMount) +} + +func (c *Container) SetVolumeMounts(volumeMounts []corev1.VolumeMount) { + c.VolumeMounts = volumeMounts +} + +func (c *Container) UpdateResources(new *corev1.ResourceRequirements) error { + current := &c.Resources + update, err := util.GetResourcePatch(current, new) + if err != nil { + return errors.Wrap(err, "failed to get resource patch") + } + + c.Resources = *update + return nil +} + +func (c *Container) SetCommand(command []string) { + c.Command = command +} + +func (c *Container) SetArgs(args []string) { + c.Args = args +} + +func (c *Container) AppendConfigMapFromSourceIfMissing(name string) { + envFrom := corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + } + c.EnvFrom = util.AppendConfigMapFromSourceIfMissing(c.Container.EnvFrom, envFrom) +} + +func (c *Container) AppendEnvVarValueFromIfMissing(name string, valueFrom *corev1.EnvVarSource) { + envVar := corev1.EnvVar{ + Name: name, + ValueFrom: valueFrom, + } + c.Env = util.AppendEnvIfMissing(c.Container.Env, envVar) +} + +func (c *Container) GetEnvs(reqs []string) []corev1.EnvVar { + envVars := []corev1.EnvVar{} + + for _, env := range c.Env { + for _, req := range reqs { + if env.Name == req { + envVars = append(envVars, env) + } + } + } + + return envVars +} + +// UpdateSecurityContext will update the security context of the container +func (c *Container) UpdateSecurityContext(sc SecurityContext) { + UpdateSecurityContext(c.Container, sc) +} + +// UpdateSecurityContext will update the security context of passed in container +func UpdateSecurityContext(c *corev1.Container, sc SecurityContext) { + if c.SecurityContext == nil { + c.SecurityContext = &corev1.SecurityContext{} + } + + c.SecurityContext.Privileged = sc.Privileged + c.SecurityContext.RunAsNonRoot = sc.RunAsNonRoot + c.SecurityContext.RunAsUser = sc.RunAsUser + c.SecurityContext.AllowPrivilegeEscalation = sc.AllowPrivilegeEscalation +} + +func (c *Container) SetReadinessProbe(probe *corev1.Probe) { + c.ReadinessProbe = probe +} + +func (c *Container) SetLivenessProbe(probe *corev1.Probe) { + c.LivenessProbe = probe +} + +func (c *Container) SetStartupProbe(probe *corev1.Probe) { + c.StartupProbe = probe +} diff --git a/pkg/manager/resources/container/container_suite_test.go b/pkg/manager/resources/container/container_suite_test.go new file mode 100644 index 00000000..effd7354 --- /dev/null +++ b/pkg/manager/resources/container/container_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package container_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestContainer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Container Suite") +} diff --git a/pkg/manager/resources/container/container_test.go b/pkg/manager/resources/container/container_test.go new file mode 100644 index 00000000..91b485e2 --- /dev/null +++ b/pkg/manager/resources/container/container_test.go @@ -0,0 +1,66 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package container_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("container", func() { + var ( + cont *container.Container + ) + + BeforeEach(func() { + cont = &container.Container{ + Container: &corev1.Container{}, + } + }) + + Context("env vars", func() { + BeforeEach(func() { + cont.Env = []corev1.EnvVar{ + corev1.EnvVar{ + Name: "env1", + Value: "1.0", + }, + } + + Expect(cont.Env).To(ContainElement(corev1.EnvVar{Name: "env1", Value: "1.0"})) + }) + + It("updates", func() { + cont.UpdateEnv("env1", "1.1") + Expect(len(cont.Env)).To(Equal(1)) + Expect(cont.Env).To(ContainElement(corev1.EnvVar{Name: "env1", Value: "1.1"})) + }) + }) + + Context("set image", func() { + It("parses sha tags", func() { + cont.SetImage("ibp-peer", "sha256:12345") + Expect(cont.Image).To(Equal("ibp-peer@sha256:12345")) + }) + }) +}) diff --git a/pkg/manager/resources/deployment/deployment.go b/pkg/manager/resources/deployment/deployment.go new file mode 100644 index 00000000..d131c967 --- /dev/null +++ b/pkg/manager/resources/deployment/deployment.go @@ -0,0 +1,237 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package deployment + +import ( + "fmt" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func New(deployment *v1.Deployment) *Deployment { + return &Deployment{ + Deployment: deployment, + } +} + +type Deployment struct { + *v1.Deployment +} + +func (d *Deployment) RemoveContainer(name string) { + for i, c := range d.Deployment.Spec.Template.Spec.Containers { + if c.Name == name { + if i == len(d.Deployment.Spec.Template.Spec.Containers)-1 { + d.Deployment.Spec.Template.Spec.Containers = d.Deployment.Spec.Template.Spec.Containers[:len(d.Deployment.Spec.Template.Spec.Containers)-1] + return + } + + d.Deployment.Spec.Template.Spec.Containers = append( + d.Deployment.Spec.Template.Spec.Containers[:i], + d.Deployment.Spec.Template.Spec.Containers[i+1:]...) + return + } + } +} + +func (d *Deployment) UpdateContainer(update container.Container) { + for i, c := range d.Deployment.Spec.Template.Spec.Containers { + if c.Name == update.Name { + d.Deployment.Spec.Template.Spec.Containers[i] = *update.Container + return + } + } +} + +func (d *Deployment) UpdateInitContainer(update container.Container) { + for i, c := range d.Deployment.Spec.Template.Spec.InitContainers { + if c.Name == update.Name { + d.Deployment.Spec.Template.Spec.InitContainers[i] = *update.Container + return + } + } +} + +func (d *Deployment) AddContainer(add container.Container) { + d.Deployment.Spec.Template.Spec.Containers = util.AppendContainerIfMissing(d.Deployment.Spec.Template.Spec.Containers, *add.Container) +} + +func (d *Deployment) AddInitContainer(add container.Container) { + d.Deployment.Spec.Template.Spec.InitContainers = util.AppendContainerIfMissing(d.Deployment.Spec.Template.Spec.InitContainers, *add.Container) +} + +func (d *Deployment) ContainerNames() []string { + names := []string{} + for _, c := range d.Deployment.Spec.Template.Spec.Containers { + names = append(names, c.Name) + } + for _, c := range d.Deployment.Spec.Template.Spec.InitContainers { + names = append(names, c.Name) + } + return names +} + +func (d *Deployment) GetContainers() map[string]container.Container { + return container.LoadFromDeployment(d.Deployment) +} + +func (d *Deployment) MustGetContainer(name string) container.Container { + cont, _ := d.GetContainer(name) + return cont +} + +func (d *Deployment) GetContainer(name string) (cont container.Container, err error) { + for i, c := range d.Deployment.Spec.Template.Spec.Containers { + if c.Name == name { + cont = container.Container{Container: &d.Deployment.Spec.Template.Spec.Containers[i]} + return + } + } + for i, c := range d.Deployment.Spec.Template.Spec.InitContainers { + if c.Name == name { + cont = container.Container{Container: &d.Deployment.Spec.Template.Spec.InitContainers[i]} + return + } + } + return cont, fmt.Errorf("container '%s' not found", name) +} + +func (d *Deployment) ContainerExists(name string) bool { + _, found := d.GetContainers()[name] + return found +} + +func (d *Deployment) SetServiceAccountName(name string) { + d.Deployment.Spec.Template.Spec.ServiceAccountName = name +} + +func (d *Deployment) SetImagePullSecrets(pullSecrets []string) { + if pullSecrets != nil && len(pullSecrets) > 0 { + d.Deployment.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{} + + for _, pullSecret := range pullSecrets { + imagePullSecret := corev1.LocalObjectReference{ + Name: pullSecret, + } + d.Deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendImagePullSecretIfMissing(d.Deployment.Spec.Template.Spec.ImagePullSecrets, imagePullSecret) + } + } +} + +func (d *Deployment) AppendPullSecret(imagePullSecret corev1.LocalObjectReference) { + d.Deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendImagePullSecretIfMissing(d.Deployment.Spec.Template.Spec.ImagePullSecrets, imagePullSecret) +} + +func (d *Deployment) AppendVolumeIfMissing(volume corev1.Volume) { + d.Deployment.Spec.Template.Spec.Volumes = util.AppendVolumeIfMissing(d.Deployment.Spec.Template.Spec.Volumes, volume) +} + +func (d *Deployment) AppendPVCVolumeIfMissing(name, claimName string) { + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + }, + }, + } + d.AppendVolumeIfMissing(volume) +} + +func (d *Deployment) AppendConfigMapVolumeIfMissing(name, localObjReferenceName string) { + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: localObjReferenceName, + }, + }, + }, + } + d.AppendVolumeIfMissing(volume) +} + +func (d *Deployment) AppendSecretVolumeIfMissing(name, secretName string) { + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + } + d.AppendVolumeIfMissing(volume) +} + +func (d *Deployment) AppendEmptyDirVolumeIfMissing(name string, storageMedium corev1.StorageMedium) { + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: storageMedium, + }, + }, + } + d.AppendVolumeIfMissing(volume) +} + +func (d *Deployment) AppendHostPathVolumeIfMissing(name, hostPath string, hostPathType corev1.HostPathType) { + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &hostPathType, + }, + }, + } + d.AppendVolumeIfMissing(volume) +} + +func (d *Deployment) SetAffinity(affinity *corev1.Affinity) { + d.Deployment.Spec.Template.Spec.Affinity = affinity +} + +func (d *Deployment) SetReplicas(replicas *int32) { + d.Deployment.Spec.Replicas = replicas +} + +func (d *Deployment) SetStrategy(strategyType appsv1.DeploymentStrategyType) { + strategy := appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + } + d.Deployment.Spec.Strategy = strategy +} + +// UpdateSecurityContextForAllContainers updates the security context for all containers defined +// in the deployment +func (d *Deployment) UpdateSecurityContextForAllContainers(sc container.SecurityContext) { + for i := range d.Spec.Template.Spec.InitContainers { + container.UpdateSecurityContext(&d.Spec.Template.Spec.InitContainers[i], sc) + } + + for i := range d.Spec.Template.Spec.Containers { + container.UpdateSecurityContext(&d.Spec.Template.Spec.Containers[i], sc) + } +} diff --git a/pkg/manager/resources/deployment/deployment_suite_test.go b/pkg/manager/resources/deployment/deployment_suite_test.go new file mode 100644 index 00000000..fbff7fd4 --- /dev/null +++ b/pkg/manager/resources/deployment/deployment_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package deployment_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestDeployment(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Deployment Suite") +} diff --git a/pkg/manager/resources/deployment/manager.go b/pkg/manager/resources/deployment/manager.go new file mode 100644 index 00000000..fa263332 --- /dev/null +++ b/pkg/manager/resources/deployment/manager.go @@ -0,0 +1,419 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package deployment + +import ( + "context" + "fmt" + "os" + "regexp" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/go-test/deep" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("deployment_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + DeploymentFile string + IgnoreDifferences []string + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *appsv1.Deployment, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + return GetName(instance.GetName(), m.Name) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + + deployment := &appsv1.Deployment{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, deployment) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating deployment '%s'", name)) + deployment, err := m.GetDeploymentBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), deployment, k8sclient.CreateOption{ + Owner: instance, + Scheme: m.Scheme, + }) + if err != nil { + return err + } + return nil + } + return err + } + + if update { + log.Info(fmt.Sprintf("Updating deployment '%s'", name)) + err = m.OverrideFunc(instance, deployment, resources.Update) + if err != nil { + return operatorerrors.New(operatorerrors.InvalidDeploymentUpdateRequest, err.Error()) + } + + err = m.Client.Patch(context.TODO(), deployment, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: &appsv1.Deployment{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + // Wait for deployment to get updated before returning + + // TODO: Currently commented this out because with the rolling updates (i.e. for console), + // it takes longer to wait for the new pod to come up and be running and for the + // old pod to then terminate. Need to figure out how to resolve this. + // err := wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { + // upToDate := m.DeploymentIsUpToDate(instance) + // if upToDate { + // return true, nil + // } + // return false, nil + // }) + // if err != nil { + // return errors.Wrap(err, "failed to determine if deployment was updated") + // } + } + + return nil +} + +func (m *Manager) GetDeploymentBasedOnCRFromFile(instance v1.Object) (*appsv1.Deployment, error) { + deployment, err := util.GetDeploymentFromFile(m.DeploymentFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading deployment configuration file: %s", m.DeploymentFile)) + return nil, err + } + + return m.BasedOnCR(instance, deployment) +} + +func (m *Manager) CheckForSecretChange(instance v1.Object, secretName string, restartFunc func(string, *appsv1.Deployment) bool) error { + name := m.GetName(instance) + + deployment := &appsv1.Deployment{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, deployment) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + + rv, err := util.GetResourceVerFromSecret(m.Client, secretName, instance.GetNamespace()) + if err == nil && rv != "" { + // Only if secret change is detected do we update deployment env var with new resource version + changed := restartFunc(rv, deployment) + if changed { + log.Info(fmt.Sprintf("Secret '%s' update detected, triggering deployment restart for peer '%s'", secretName, instance.GetName())) + err = m.Client.Update(context.TODO(), deployment) + if err != nil { + return errors.Wrap(err, "failed to update deployment with secret resource version") + } + } + } + + return nil + +} + +func (m *Manager) BasedOnCR(instance v1.Object, deployment *appsv1.Deployment) (*appsv1.Deployment, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, deployment, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidDeploymentCreateRequest, err.Error()) + } + } + + deployment.Name = m.GetName(instance) + deployment.Namespace = instance.GetNamespace() + requiredLabels := m.LabelsFunc(instance) + labels := deployment.Labels + if len(labels) == 0 { + labels = make(map[string]string) + } + for requiredKey, requiredElement := range requiredLabels { + labels[requiredKey] = requiredElement + } + deployment.Labels = labels + deployment.Spec.Template.Labels = labels + deployment.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: m.getSelectorLabels(instance), + } + + return deployment, nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + if instance == nil { + return nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + + // Get the latest version of the instance + deployment := &appsv1.Deployment{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, deployment) + if err != nil { + return nil + } + + copy := deployment.DeepCopy() + expectedDeployment, err := m.BasedOnCR(instance, copy) + if err != nil { + return err + } + + deep.MaxDepth = 20 + deep.MaxDiff = 30 + deep.CompareUnexportedFields = true + deep.LogErrors = true + + if os.Getenv("OPERATOR_DEBUG_DISABLEDEPLOYMENTCHECKS") == "true" { + return nil + } + + diff := deep.Equal(deployment.Spec, expectedDeployment.Spec) + if diff != nil { + err := m.ignoreDifferences(diff) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("deployment (%s) has been edited manually, and does not match what is expected based on the CR", deployment.GetName())) + } + } + + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + if instance == nil { + return nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + deployment := &appsv1.Deployment{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, deployment) + if err != nil { + return nil + } + + deployment, err = m.BasedOnCR(instance, deployment) + if err != nil { + return err + } + + err = m.Client.Patch(context.TODO(), deployment, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 2, + Into: &appsv1.Deployment{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + + return nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + deployment := &appsv1.Deployment{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, deployment) + if err != nil { + return nil, err + } + + return deployment, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + dep, err := m.Get(instance) + if err != nil || dep == nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + dep, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if dep == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), dep) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) getSelectorLabels(instance v1.Object) map[string]string { + return map[string]string{ + "app": instance.GetName(), + } +} + +func (m *Manager) ignoreDifferences(diff []string) error { + diffs := []string{} + for _, d := range diff { + found := false + for _, i := range m.differenceToIgnore() { + regex := regexp.MustCompile(i) + found = regex.MatchString(d) + if found { + break + } + } + if !found { + diffs = append(diffs, d) + return fmt.Errorf("unexpected mismatch: %s", d) + } + } + return nil +} + +func (m *Manager) differenceToIgnore() []string { + d := []string{ + "TypeMeta", "ObjectMeta", + "VolumeSource.Secret.DefaultMode", + "VolumeSource.ConfigMap.DefaultMode", + "TerminationMessagePath", + "TerminationMessagePolicy", + "SecurityContext.ProcMount", + "Template.Spec.TerminationGracePeriodSeconds", + "Template.Spec.DNSPolicy", + "Template.Spec.DeprecatedServiceAccount", + "Template.Spec.SchedulerName", + "RevisionHistoryLimit", + "RestartPolicy", + "ProgressDeadlineSeconds", + "LivenessProbe.SuccessThreshold", + "LivenessProbe.FailureThreshold", + "LivenessProbe.InitialDelaySeconds", + "LivenessProbe.PeriodSeconds", + "LivenessProbe.TimeoutSeconds", + "ReadinessProbe.SuccessThreshold", + "ReadinessProbe.FailureThreshold", + "ReadinessProbe.InitialDelaySeconds", + "ReadinessProbe.PeriodSeconds", + "ReadinessProbe.TimeoutSeconds", + "StartupProbe.SuccessThreshold", + "StartupProbe.FailureThreshold", + "StartupProbe.InitialDelaySeconds", + "StartupProbe.PeriodSeconds", + "StartupProbe.TimeoutSeconds", + "ValueFrom.FieldRef.APIVersion", + "Template.Spec.Affinity", + "Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms", + "Strategy.RollingUpdate", + } + d = append(d, m.IgnoreDifferences...) + return d +} + +func (m *Manager) DeploymentIsUpToDate(instance v1.Object) bool { + deployment := &appsv1.Deployment{} + err := m.Client.Get( + context.TODO(), + types.NamespacedName{Name: m.GetName(instance), Namespace: instance.GetNamespace()}, + deployment, + ) + if err != nil { + return false + } + + if deployment.Status.Replicas > 0 { + if deployment.Status.Replicas != deployment.Status.UpdatedReplicas { + return false + } + } + + return true +} + +func (m *Manager) DeploymentStatus(instance v1.Object) (appsv1.DeploymentStatus, error) { + deployment := &appsv1.Deployment{} + err := m.Client.Get( + context.TODO(), + types.NamespacedName{Name: m.GetName(instance), Namespace: instance.GetNamespace()}, + deployment, + ) + if err != nil { + return appsv1.DeploymentStatus{}, err + } + + return deployment.Status, nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func (m *Manager) GetScheme() *runtime.Scheme { + return m.Scheme +} + +func GetName(instanceName string, suffix ...string) string { + if len(suffix) != 0 { + if suffix[0] != "" { + return fmt.Sprintf("%s-%s", instanceName, suffix[0]) + } + } + return fmt.Sprintf("%s", instanceName) +} diff --git a/pkg/manager/resources/deployment/manager_test.go b/pkg/manager/resources/deployment/manager_test.go new file mode 100644 index 00000000..435dc4bd --- /dev/null +++ b/pkg/manager/resources/deployment/manager_test.go @@ -0,0 +1,205 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package deployment_test + +import ( + "context" + + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Deployment manager", func() { + var ( + mockKubeClient *mocks.Client + manager *deployment.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *appsv1.Deployment: + o := obj.(*appsv1.Deployment) + manager.BasedOnCR(instance, o) + o.Status.Replicas = 1 + o.Status.UpdatedReplicas = 1 + } + return nil + } + + manager = &deployment.Manager{ + DeploymentFile: "../../../../definitions/ca/deployment.yaml", + Client: mockKubeClient, + OverrideFunc: func(object v1.Object, d *appsv1.Deployment, action resources.Action) error { + d.Spec = appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{corev1.Container{ + Name: "container", + }}, + }, + }, + } + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the deployment instance", func() { + It("does not try to create deployment if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("deployment does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.DeploymentFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override deployment value fails", func() { + manager.OverrideFunc = func(v1.Object, *appsv1.Deployment, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the Deployment fails", func() { + errMsg := "unable to create service" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull Deployment creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + When("deployment already exists", func() { + It("returns an error if override deployment value fails", func() { + manager.OverrideFunc = func(v1.Object, *appsv1.Deployment, resources.Action) error { + return errors.New("update override failed") + } + err := manager.Reconcile(instance, true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("update override failed")) + }) + + It("returns an error if the updating of Deployment fails", func() { + errMsg := "unable to update deployment" + mockKubeClient.PatchReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull Deployment update", func() { + err := manager.Reconcile(instance, true) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Context("check deployment state", func() { + It("returns an error if an unexpected change in deployment is detected", func() { + dep := &appsv1.Deployment{} + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *appsv1.Deployment: + dep = obj.(*appsv1.Deployment) + dep.Spec = appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": ""}, + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{corev1.Container{ + Name: "test-container", + }}, + }, + }, + } + } + return nil + } + + err := manager.CheckState(dep) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("deployment () has been edited manually, and does not match what is expected based on the CR: unexpected mismatch: Template.Spec.Containers.slice[0].Name: test-container != container")) + }) + + It("returns no error if no changes detected for deployment", func() { + err := manager.CheckState(&appsv1.Deployment{}) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("restore deployment state", func() { + It("returns an error if the restoring deployment state fails", func() { + errMsg := "unable to restore deployment" + mockKubeClient.PatchReturns(errors.New(errMsg)) + err := manager.RestoreState(&appsv1.Deployment{}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("returns no error if able to restore deployment state", func() { + err := manager.RestoreState(&appsv1.Deployment{}) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/manager/resources/ingress/ingress_suite_test.go b/pkg/manager/resources/ingress/ingress_suite_test.go new file mode 100644 index 00000000..aa7b356e --- /dev/null +++ b/pkg/manager/resources/ingress/ingress_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ingress_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestDeployment(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Deployment Suite") +} diff --git a/pkg/manager/resources/ingress/manager.go b/pkg/manager/resources/ingress/manager.go new file mode 100644 index 00000000..213790f5 --- /dev/null +++ b/pkg/manager/resources/ingress/manager.go @@ -0,0 +1,264 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ingress + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("ingress_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + IngressFile string + Suffix string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *networkingv1.Ingress, resources.Action) error + + routeName string + Name string +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := instance.GetName() + if m.Suffix != "" { + name = fmt.Sprintf("%s-%s", instance.GetName(), m.Suffix) + } + ingress := &networkingv1.Ingress{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating ingress '%s'", name)) + ingress, err := m.GetIngressBasedOnCRFromFile(instance) + if err != nil { + return err + } + err = m.Client.Create(context.TODO(), ingress, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + + err = m.UpdateIngressClassName(name, instance) + if err != nil { + log.Error(err, "Error updating ingress class name") + return err + } + + return nil + } + return err + } + + if update { + if m.OverrideFunc != nil { + log.Info(fmt.Sprintf("Updating ingress '%s'", name)) + err := m.OverrideFunc(instance, ingress, resources.Update) + if err != nil { + return err + } + + err = m.Client.Update(context.TODO(), ingress, k8sclient.UpdateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + + err = m.UpdateIngressClassName(name, instance) + if err != nil { + log.Error(err, "Error updating ingress class name") + return err + } + + return nil + } + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + if instance == nil { + return false // Instance has not been reconciled yet + } + + name := instance.GetName() + if m.Suffix != "" { + name = fmt.Sprintf("%s-%s", instance.GetName(), m.Suffix) + } + + ingress := &networkingv1.Ingress{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + ingress, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if ingress == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), ingress) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + ingress := &networkingv1.Ingress{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if err != nil { + return nil, err + } + + return ingress, nil +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.Name != "" { + return fmt.Sprintf("%s-%s", instance.GetName(), m.Name) + } + return instance.GetName() +} + +func (m *Manager) GetIngressBasedOnCRFromFile(instance v1.Object) (*networkingv1.Ingress, error) { + ingress, err := util.GetIngressFromFile(m.IngressFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading ingress configuration file: %s", m.IngressFile)) + return nil, err + } + + return m.BasedOnCR(instance, ingress) +} + +func (m *Manager) BasedOnCR(instance v1.Object, ingress *networkingv1.Ingress) (*networkingv1.Ingress, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, ingress, resources.Create) + if err != nil { + return nil, errors.Wrap(err, "failed during ingress override") + } + } + + ingress.Name = instance.GetName() + if m.Suffix != "" { + ingress.Name = fmt.Sprintf("%s-%s", instance.GetName(), m.Suffix) + } + + ingress.Namespace = instance.GetNamespace() + ingress.Labels = m.LabelsFunc(instance) + + return ingress, nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func (m *Manager) UpdateIngressClassName(name string, instance v1.Object) error { + ingress := &networkingv1.Ingress{} + + // We have to wait for ingress to be available + // as it fails if this function is called immediately after creation + log.Info("Waiting for ingress resource to be ready", "ingress", name) + + ingressPollTimeout := 10 * time.Second + + if pollTime := os.Getenv("INGRESS_RESOURCE_POLL_TIMEOUT"); pollTime != "" { + d, err := time.ParseDuration(pollTime) + if err != nil { + return err + } + + ingressPollTimeout = d + } + + var errGet error + err := wait.Poll(500*time.Millisecond, ingressPollTimeout, func() (bool, error) { + errGet = m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if errGet != nil { + return false, nil + } + return true, nil + }) + + if err != nil { + return err + } + + ingressClass := ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] + if ingressClass != "" { + ingress.Spec.IngressClassName = &ingressClass + } + + log.Info("Updating ingress classname in the ingress resource spec", "ingress", name, "ingressClassName", ingressClass) + err = m.Client.Update(context.TODO(), ingress, k8sclient.UpdateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/manager/resources/ingress/manager_test.go b/pkg/manager/resources/ingress/manager_test.go new file mode 100644 index 00000000..abf622b3 --- /dev/null +++ b/pkg/manager/resources/ingress/manager_test.go @@ -0,0 +1,146 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ingress_test + +import ( + "context" + + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + ingress "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/ingress" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Ingress manager", func() { + var ( + mockKubeClient *mocks.Client + manager *ingress.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + + instance = &metav1.ObjectMeta{} + + manager = &ingress.Manager{ + IngressFile: "../../../../definitions/ca/ingress.yaml", + Client: mockKubeClient, + OverrideFunc: func(object v1.Object, ingress *networkingv1.Ingress, action resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the ingress instance", func() { + It("does not try to create ingress if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("ingress does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.IngressFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override ingress value fails", func() { + manager.OverrideFunc = func(v1.Object, *networkingv1.Ingress, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the Ingress fails", func() { + errMsg := "unable to create ingress" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull ingress creation", func() { + ing := networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.GetName(), + Namespace: instance.GetNamespace(), + Annotations: map[string]string{ + "test": "test value", + }, + }, + } + + count := 0 + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + + switch obj.(type) { + case *networkingv1.Ingress: + if count == 0 { + // Send not found the first time to go to creation path + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + count++ + return notFoundErr + } + + i := obj.(*networkingv1.Ingress) + i.ObjectMeta = ing.ObjectMeta + } + + return nil + } + + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/ingressv1beta1/ingress_suite_test.go b/pkg/manager/resources/ingressv1beta1/ingress_suite_test.go new file mode 100644 index 00000000..e5cfa4fb --- /dev/null +++ b/pkg/manager/resources/ingressv1beta1/ingress_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ingressv1beta1_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestDeployment(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Deployment Suite") +} diff --git a/pkg/manager/resources/ingressv1beta1/manager.go b/pkg/manager/resources/ingressv1beta1/manager.go new file mode 100644 index 00000000..2bc5a52d --- /dev/null +++ b/pkg/manager/resources/ingressv1beta1/manager.go @@ -0,0 +1,264 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ingressv1beta1 + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/pkg/errors" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("ingress_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + IngressFile string + Suffix string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *networkingv1beta1.Ingress, resources.Action) error + + routeName string + Name string +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := instance.GetName() + if m.Suffix != "" { + name = fmt.Sprintf("%s-%s", instance.GetName(), m.Suffix) + } + ingress := &networkingv1beta1.Ingress{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating ingressv1beta1 '%s'", name)) + ingress, err := m.GetIngressBasedOnCRFromFile(instance) + if err != nil { + return err + } + err = m.Client.Create(context.TODO(), ingress, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + + err = m.UpdateIngressClassName(name, instance) + if err != nil { + log.Error(err, "Error updating ingress class name") + return err + } + + return nil + } + return err + } + + if update { + if m.OverrideFunc != nil { + log.Info(fmt.Sprintf("Updating ingressv1beta1 '%s'", name)) + err := m.OverrideFunc(instance, ingress, resources.Update) + if err != nil { + return err + } + + err = m.Client.Update(context.TODO(), ingress, k8sclient.UpdateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + + err = m.UpdateIngressClassName(name, instance) + if err != nil { + log.Error(err, "Error updating ingress class name") + return err + } + + return nil + } + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + if instance == nil { + return false // Instance has not been reconciled yet + } + + name := instance.GetName() + if m.Suffix != "" { + name = fmt.Sprintf("%s-%s", instance.GetName(), m.Suffix) + } + + ingress := &networkingv1beta1.Ingress{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + ingress, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if ingress == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), ingress) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + ingress := &networkingv1beta1.Ingress{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if err != nil { + return nil, err + } + + return ingress, nil +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.Name != "" { + return fmt.Sprintf("%s-%s", instance.GetName(), m.Name) + } + return instance.GetName() +} + +func (m *Manager) GetIngressBasedOnCRFromFile(instance v1.Object) (*networkingv1beta1.Ingress, error) { + ingress, err := util.GetIngressv1beta1FromFile(m.IngressFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading ingress ingressv1beta1 file: %s", m.IngressFile)) + return nil, err + } + + return m.BasedOnCR(instance, ingress) +} + +func (m *Manager) BasedOnCR(instance v1.Object, ingress *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, ingress, resources.Create) + if err != nil { + return nil, errors.Wrap(err, "failed during ingressv1beta1 override") + } + } + + ingress.Name = instance.GetName() + if m.Suffix != "" { + ingress.Name = fmt.Sprintf("%s-%s", instance.GetName(), m.Suffix) + } + + ingress.Namespace = instance.GetNamespace() + ingress.Labels = m.LabelsFunc(instance) + + return ingress, nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func (m *Manager) UpdateIngressClassName(name string, instance v1.Object) error { + ingress := &networkingv1beta1.Ingress{} + + // We have to wait for ingress to be available + // as it fails if this function is called immediately after creation + log.Info("Waiting for ingressv1beta1 resource to be ready", "ingress", name) + + ingressPollTimeout := 10 * time.Second + + if pollTime := os.Getenv("INGRESS_RESOURCE_POLL_TIMEOUT"); pollTime != "" { + d, err := time.ParseDuration(pollTime) + if err != nil { + return err + } + + ingressPollTimeout = d + } + + var errGet error + err := wait.Poll(500*time.Millisecond, ingressPollTimeout, func() (bool, error) { + errGet = m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, ingress) + if errGet != nil { + return false, nil + } + return true, nil + }) + + if err != nil { + return err + } + + ingressClass := ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] + if ingressClass != "" { + ingress.Spec.IngressClassName = &ingressClass + } + + log.Info("Updating ingress classname in the ingress resource spec", "ingress", name, "ingressClassName", ingressClass) + err = m.Client.Update(context.TODO(), ingress, k8sclient.UpdateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/manager/resources/ingressv1beta1/manager_test.go b/pkg/manager/resources/ingressv1beta1/manager_test.go new file mode 100644 index 00000000..93643032 --- /dev/null +++ b/pkg/manager/resources/ingressv1beta1/manager_test.go @@ -0,0 +1,146 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ingressv1beta1_test + +import ( + "context" + + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + ingressv1beta1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/ingressv1beta1" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Ingress manager", func() { + var ( + mockKubeClient *mocks.Client + manager *ingressv1beta1.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + + instance = &metav1.ObjectMeta{} + + manager = &ingressv1beta1.Manager{ + IngressFile: "../../../../definitions/ca/ingressv1beta1.yaml", + Client: mockKubeClient, + OverrideFunc: func(object v1.Object, ingress *networkingv1beta1.Ingress, action resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the ingress instance", func() { + It("does not try to create ingress if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("ingress does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.IngressFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override ingress value fails", func() { + manager.OverrideFunc = func(v1.Object, *networkingv1beta1.Ingress, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the Ingress fails", func() { + errMsg := "unable to create ingress" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull ingress creation", func() { + ing := networkingv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.GetName(), + Namespace: instance.GetNamespace(), + Annotations: map[string]string{ + "test": "test value", + }, + }, + } + + count := 0 + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + + switch obj.(type) { + case *networkingv1beta1.Ingress: + if count == 0 { + // Send not found the first time to go to creation path + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + count++ + return notFoundErr + } + + i := obj.(*networkingv1beta1.Ingress) + i.ObjectMeta = ing.ObjectMeta + } + + return nil + } + + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/job/job.go b/pkg/manager/resources/job/job.go new file mode 100644 index 00000000..fa33b3d4 --- /dev/null +++ b/pkg/manager/resources/job/job.go @@ -0,0 +1,366 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package job + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "time" + + "github.com/pkg/errors" + + controller "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + v1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +type Status string + +const ( + FAILED Status = "failed" + COMPLETED Status = "completed" + UNKNOWN Status = "unknown" +) + +var log = logf.Log.WithName("job_resource") + +type Timeouts struct { + WaitUntilActive, WaitUntilFinished time.Duration +} + +func jobIDGenerator() string { + charset := "0123456789abcdefghijklmnopqrstuvwxyz" + + randString1 := make([]byte, 10) + for i := range randString1 { + num, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + randString1[i] = charset[num.Int64()] + } + + randString2 := make([]byte, 5) + for i := range randString2 { + num, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + randString2[i] = charset[num.Int64()] + } + + return string(randString1) + "-" + string(randString2) +} + +func New(job *v1.Job, timeouts *Timeouts) *Job { + if job != nil { + job.Name = fmt.Sprintf("%s-%s", job.GetName(), jobIDGenerator()) + } + + return &Job{ + Job: job, + Timeouts: timeouts, + } +} + +func NewWithDefaults(job *v1.Job) *Job { + if job != nil { + job.Name = fmt.Sprintf("%s-%s", job.GetName(), jobIDGenerator()) + } + + return &Job{ + Job: job, + Timeouts: &Timeouts{ + WaitUntilActive: 60 * time.Second, + WaitUntilFinished: 60 * time.Second, + }, + } +} + +func NewWithDefaultsUseExistingName(job *v1.Job) *Job { + return &Job{ + Job: job, + Timeouts: &Timeouts{ + WaitUntilActive: 60 * time.Second, + WaitUntilFinished: 60 * time.Second, + }, + } +} + +type Job struct { + *v1.Job + + Timeouts *Timeouts +} + +func (j *Job) MustGetContainer(name string) container.Container { + cont, _ := j.GetContainer(name) + return cont +} + +func (j *Job) GetContainer(name string) (cont container.Container, err error) { + for i, c := range j.Spec.Template.Spec.Containers { + if c.Name == name { + cont = container.Container{Container: &j.Spec.Template.Spec.Containers[i]} + return + } + } + for i, c := range j.Spec.Template.Spec.InitContainers { + if c.Name == name { + cont = container.Container{Container: &j.Spec.Template.Spec.InitContainers[i]} + return + } + } + return cont, fmt.Errorf("container '%s' not found", name) +} + +func (j *Job) AddContainer(add container.Container) { + j.Spec.Template.Spec.Containers = util.AppendContainerIfMissing(j.Spec.Template.Spec.Containers, *add.Container) +} + +func (j *Job) AddInitContainer(add container.Container) { + j.Spec.Template.Spec.InitContainers = util.AppendContainerIfMissing(j.Spec.Template.Spec.InitContainers, *add.Container) +} + +func (j *Job) AppendVolumeIfMissing(volume corev1.Volume) { + j.Spec.Template.Spec.Volumes = util.AppendVolumeIfMissing(j.Spec.Template.Spec.Volumes, volume) +} + +func (j *Job) AppendPullSecret(imagePullSecret corev1.LocalObjectReference) { + j.Spec.Template.Spec.ImagePullSecrets = util.AppendImagePullSecretIfMissing(j.Spec.Template.Spec.ImagePullSecrets, imagePullSecret) +} + +// UpdateSecurityContextForAllContainers updates the security context for all containers defined +// in the job +func (j *Job) UpdateSecurityContextForAllContainers(sc container.SecurityContext) { + for i := range j.Spec.Template.Spec.InitContainers { + container.UpdateSecurityContext(&j.Spec.Template.Spec.InitContainers[i], sc) + } + + for i := range j.Spec.Template.Spec.Containers { + container.UpdateSecurityContext(&j.Spec.Template.Spec.Containers[i], sc) + } +} + +func (j *Job) Delete(client controller.Client) error { + if err := client.Delete(context.TODO(), j.Job); err != nil { + return errors.Wrap(err, "failed to delete") + } + + // TODO: Need to investigate why job is not adding controller reference to job pod, + // this manual cleanup should not be required after deleting job + podList := &corev1.PodList{} + if err := client.List(context.TODO(), podList, k8sclient.MatchingLabels{"job-name": j.GetName()}); err != nil { + return errors.Wrap(err, "failed to list job pods") + } + + for _, pod := range podList.Items { + podListItem := pod + if err := client.Delete(context.TODO(), &podListItem); err != nil { + return errors.Wrapf(err, "failed to delete pod '%s'", podListItem.Name) + } + } + + return nil +} + +func (j *Job) Status(client controller.Client) (Status, error) { + k8sJob, err := j.get(client) + if err != nil { + return UNKNOWN, err + } + + if k8sJob.Status.Failed >= int32(1) { + return FAILED, nil + } + + pods, err := j.getPods(client) + if err != nil { + return UNKNOWN, err + } + + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodSucceeded { + return FAILED, nil + } + } + + return COMPLETED, nil +} + +func (j *Job) ContainerStatus(client controller.Client, contName string) (Status, error) { + pods, err := j.getPods(client) + if err != nil { + return UNKNOWN, err + } + + for _, pod := range pods.Items { + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == contName { + if containerStatus.State.Terminated != nil { + if containerStatus.State.Terminated.ExitCode == int32(0) { + return COMPLETED, nil + } + return FAILED, nil + } + } + } + } + + return UNKNOWN, nil +} + +func (j *Job) WaitUntilActive(client controller.Client) error { + err := wait.Poll(500*time.Millisecond, j.Timeouts.WaitUntilActive, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for job '%s' to start in namespace '%s'", j.GetName(), j.GetNamespace())) + + k8sJob, err := j.get(client) + if err != nil { + return false, err + } + + if k8sJob.Status.Active >= int32(1) || k8sJob.Status.Succeeded >= int32(1) { + return true, nil + } + + return false, nil + }) + if err != nil { + return errors.Wrap(err, "job failed to start") + } + return nil +} + +func (j *Job) WaitUntilFinished(client controller.Client) error { + var err error + + err = wait.Poll(2*time.Second, j.Timeouts.WaitUntilFinished, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for job pod '%s' to finish", j.GetName())) + + pods, err := j.getPods(client) + if err != nil { + log.Info(fmt.Sprintf("get job pod err: %s", err)) + return false, nil + } + + if len(pods.Items) == 0 { + return false, nil + } + + return j.podsTerminated(pods), nil + }) + if err != nil { + return errors.Wrapf(err, "pod for job '%s' failed to finish", j.GetName()) + } + + return nil +} + +func (j *Job) podsTerminated(pods *corev1.PodList) bool { + for _, pod := range pods.Items { + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Terminated == nil { + return false + } + } + } + + return true +} + +func (j *Job) WaitUntilContainerFinished(client controller.Client, contName string) error { + var err error + + err = wait.Poll(2*time.Second, j.Timeouts.WaitUntilFinished, func() (bool, error) { + log.Info(fmt.Sprintf("Waiting for job pod '%s' to finish", j.GetName())) + + pods, err := j.getPods(client) + if err != nil { + log.Info(fmt.Sprintf("get job pod err: %s", err)) + return false, nil + } + + if len(pods.Items) == 0 { + return false, nil + } + + return j.containerTerminated(pods, contName), nil + }) + if err != nil { + return errors.Wrapf(err, "pod for job '%s' failed to finish", j.GetName()) + } + + return nil +} + +func (j *Job) ContainerFinished(client controller.Client, contName string) (bool, error) { + pods, err := j.getPods(client) + if err != nil { + return false, err + } + + return j.containerTerminated(pods, contName), nil +} + +func (j *Job) containerTerminated(pods *corev1.PodList, contName string) bool { + for _, pod := range pods.Items { + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == contName { + if containerStatus.State.Terminated == nil { + return false + } + } + } + } + + return true +} + +func (j *Job) getPods(client controller.Client) (*corev1.PodList, error) { + labelSelector, err := labels.Parse(fmt.Sprintf("job-name=%s", j.GetName())) + if err != nil { + return nil, err + } + + opts := &k8sclient.ListOptions{ + LabelSelector: labelSelector, + } + + pods := &corev1.PodList{} + if err := client.List(context.TODO(), pods, opts); err != nil { + return nil, err + } + + return pods, nil +} + +func (j *Job) get(client controller.Client) (*v1.Job, error) { + k8sJob := &v1.Job{} + err := client.Get(context.TODO(), types.NamespacedName{Name: j.GetName(), Namespace: j.GetNamespace()}, k8sJob) + if err != nil { + return nil, err + } + + return k8sJob, nil +} diff --git a/pkg/manager/resources/job/job_suite_test.go b/pkg/manager/resources/job/job_suite_test.go new file mode 100644 index 00000000..f8a8b7f8 --- /dev/null +++ b/pkg/manager/resources/job/job_suite_test.go @@ -0,0 +1,33 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package job_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestJob(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Job Suite") +} + +//go:generate counterfeiter -o mocks/client.go -fake-name Client ../../../k8s/controllerclient Client diff --git a/pkg/manager/resources/job/job_test.go b/pkg/manager/resources/job/job_test.go new file mode 100644 index 00000000..bd472960 --- /dev/null +++ b/pkg/manager/resources/job/job_test.go @@ -0,0 +1,265 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package job_test + +import ( + "context" + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job/mocks" + + v1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Job", func() { + var ( + k8sJob *v1.Job + testJob *job.Job + ) + + BeforeEach(func() { + k8sJob = &v1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "k8sJob", + Namespace: "default", + }, + } + testJob = &job.Job{ + Job: k8sJob, + } + }) + + It("creates job with defaults", func() { + testJob = job.NewWithDefaults(k8sJob) + Expect(testJob.Timeouts).To(Equal(&job.Timeouts{ + WaitUntilActive: 60 * time.Second, + WaitUntilFinished: 60 * time.Second, + })) + + By("adding unique id to job name", func() { + Expect(testJob.Name).To(ContainSubstring("k8sJob-")) + }) + }) + + It("adds container", func() { + cont := container.Container{ + Container: &corev1.Container{ + Name: "test-cont", + }, + } + + testJob.AddContainer(cont) + Expect(len(testJob.Spec.Template.Spec.Containers)).To(Equal(1)) + Expect(testJob.Spec.Template.Spec.Containers[0]).To(Equal(*cont.Container)) + }) + + Context("volumes", func() { + BeforeEach(func() { + testJob.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "test-volume", + }, + } + }) + + It("appends volume if missing", func() { + testJob.AppendVolumeIfMissing(corev1.Volume{Name: "test-volume"}) + testJob.AppendVolumeIfMissing(corev1.Volume{Name: "test-volume2"}) + + Expect(len(testJob.Spec.Template.Spec.Volumes)).To(Equal(2)) + Expect(testJob.Spec.Template.Spec.Volumes[1]).To(Equal(corev1.Volume{Name: "test-volume2"})) + }) + }) + + Context("image pull secrets", func() { + BeforeEach(func() { + testJob.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: "pullsecret", + }, + } + }) + + It("appends volume if missing", func() { + testJob.AppendPullSecret(corev1.LocalObjectReference{Name: "pullsecret"}) + testJob.AppendPullSecret(corev1.LocalObjectReference{Name: "pullsecret2"}) + + Expect(len(testJob.Spec.Template.Spec.ImagePullSecrets)).To(Equal(2)) + Expect(testJob.Spec.Template.Spec.ImagePullSecrets[1]).To( + Equal(corev1.LocalObjectReference{Name: "pullsecret2"}), + ) + }) + }) + + Context("events", func() { + var ( + client *mocks.Client + ) + + BeforeEach(func() { + client = &mocks.Client{} + + }) + + Context("status", func() { + Context("failures", func() { + Context("job", func() { + When("getting job from API server fails", func() { + BeforeEach(func() { + client.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + return errors.New("failed to get job") + } + }) + + It("returns error and UNKNOWN status", func() { + status, err := testJob.Status(client) + Expect(err).To(HaveOccurred()) + Expect(status).To(Equal(job.UNKNOWN)) + }) + }) + + When("job has failed", func() { + BeforeEach(func() { + client.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + j := obj.(*v1.Job) + j.Status = v1.JobStatus{ + Failed: int32(1), + } + return nil + } + }) + + It("returns FAILED status", func() { + status, err := testJob.Status(client) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal(job.FAILED)) + }) + }) + }) + + Context("pods", func() { + When("getting pods from API server fails", func() { + BeforeEach(func() { + client.ListStub = func(ctx context.Context, list k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + return errors.New("failed to list pods") + } + }) + + It("returns error and UNKNOWN status", func() { + status, err := testJob.Status(client) + Expect(err).To(HaveOccurred()) + Expect(status).To(Equal(job.UNKNOWN)) + }) + }) + + When("job has failed", func() { + BeforeEach(func() { + client.ListStub = func(ctx context.Context, list k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + pods := list.(*corev1.PodList) + pods.Items = []corev1.Pod{ + { + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + }, + }, + } + return nil + } + }) + + It("returns FAILED status", func() { + status, err := testJob.Status(client) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal(job.FAILED)) + }) + }) + }) + }) + + It("returns COMPLETED state", func() { + status, err := testJob.Status(client) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal(job.COMPLETED)) + }) + }) + + Context("wait until active", func() { + BeforeEach(func() { + testJob.Timeouts = &job.Timeouts{ + WaitUntilActive: time.Second, + } + + client.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + j := obj.(*v1.Job) + j.Status = v1.JobStatus{ + Active: int32(1), + } + return nil + } + }) + + It("returns before timeout with no errors", func() { + err := testJob.WaitUntilActive(client) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("wait until finished", func() { + BeforeEach(func() { + testJob.Timeouts = &job.Timeouts{ + WaitUntilFinished: time.Second, + } + + client.ListStub = func(ctx context.Context, list k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + pods := list.(*corev1.PodList) + pods.Items = []corev1.Pod{ + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + } + return nil + } + }) + + It("returns before timeout with no errors", func() { + err := testJob.WaitUntilFinished(client) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/job/mocks/client.go b/pkg/manager/resources/job/mocks/client.go new file mode 100644 index 00000000..ee14505d --- /dev/null +++ b/pkg/manager/resources/job/mocks/client.go @@ -0,0 +1,746 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "context" + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Client struct { + CreateStub func(context.Context, client.Object, ...controllerclient.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + CreateOrUpdateStub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + PatchStatusStub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error + patchStatusMutex sync.RWMutex + patchStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + } + patchStatusReturns struct { + result1 error + } + patchStatusReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...controllerclient.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + UpdateStatusStub func(context.Context, client.Object, ...client.UpdateOption) error + updateStatusMutex sync.RWMutex + updateStatusArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateStatusReturns struct { + result1 error + } + updateStatusReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Client) Create(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Client) CreateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Client) CreateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdate(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.CreateOrUpdateOption) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.CreateOrUpdateOption + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *Client) CreateOrUpdateCalls(stub func(context.Context, client.Object, ...controllerclient.CreateOrUpdateOption) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *Client) CreateOrUpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.CreateOrUpdateOption) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *Client) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *Client) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *Client) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *Client) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *Client) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *Client) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *Client) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *Client) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatus(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...controllerclient.PatchOption) error { + fake.patchStatusMutex.Lock() + ret, specificReturn := fake.patchStatusReturnsOnCall[len(fake.patchStatusArgsForCall)] + fake.patchStatusArgsForCall = append(fake.patchStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []controllerclient.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStatusStub + fakeReturns := fake.patchStatusReturns + fake.recordInvocation("PatchStatus", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) PatchStatusCallCount() int { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + return len(fake.patchStatusArgsForCall) +} + +func (fake *Client) PatchStatusCalls(stub func(context.Context, client.Object, client.Patch, ...controllerclient.PatchOption) error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = stub +} + +func (fake *Client) PatchStatusArgsForCall(i int) (context.Context, client.Object, client.Patch, []controllerclient.PatchOption) { + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + argsForCall := fake.patchStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *Client) PatchStatusReturns(result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + fake.patchStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) PatchStatusReturnsOnCall(i int, result1 error) { + fake.patchStatusMutex.Lock() + defer fake.patchStatusMutex.Unlock() + fake.PatchStatusStub = nil + if fake.patchStatusReturnsOnCall == nil { + fake.patchStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Update(arg1 context.Context, arg2 client.Object, arg3 ...controllerclient.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []controllerclient.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Client) UpdateCalls(stub func(context.Context, client.Object, ...controllerclient.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Client) UpdateArgsForCall(i int) (context.Context, client.Object, []controllerclient.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatus(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateStatusMutex.Lock() + ret, specificReturn := fake.updateStatusReturnsOnCall[len(fake.updateStatusArgsForCall)] + fake.updateStatusArgsForCall = append(fake.updateStatusArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStatusStub + fakeReturns := fake.updateStatusReturns + fake.recordInvocation("UpdateStatus", []interface{}{arg1, arg2, arg3}) + fake.updateStatusMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Client) UpdateStatusCallCount() int { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + return len(fake.updateStatusArgsForCall) +} + +func (fake *Client) UpdateStatusCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = stub +} + +func (fake *Client) UpdateStatusArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + argsForCall := fake.updateStatusArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Client) UpdateStatusReturns(result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + fake.updateStatusReturns = struct { + result1 error + }{result1} +} + +func (fake *Client) UpdateStatusReturnsOnCall(i int, result1 error) { + fake.updateStatusMutex.Lock() + defer fake.updateStatusMutex.Unlock() + fake.UpdateStatusStub = nil + if fake.updateStatusReturnsOnCall == nil { + fake.updateStatusReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateStatusReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Client) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.patchStatusMutex.RLock() + defer fake.patchStatusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateStatusMutex.RLock() + defer fake.updateStatusMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Client) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ controllerclient.Client = new(Client) diff --git a/pkg/manager/resources/manager/manager.go b/pkg/manager/resources/manager/manager.go new file mode 100644 index 00000000..20fb3da9 --- /dev/null +++ b/pkg/manager/resources/manager/manager.go @@ -0,0 +1,188 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package manager + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/configmap" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/ingress" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/ingressv1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/orderernode" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/pv" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/pvc" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/role" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/rolebinding" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/route" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme +} + +func New(client k8sclient.Client, scheme *runtime.Scheme) *Manager { + return &Manager{ + Client: client, + Scheme: scheme, + } +} + +func (m *Manager) CreateDeploymentManager(name string, oFunc func(v1.Object, *appsv1.Deployment, resources.Action) error, labelsFunc func(v1.Object) map[string]string, deploymentFile string) *deployment.Manager { + return &deployment.Manager{ + Client: m.Client, + Scheme: m.Scheme, + DeploymentFile: deploymentFile, + LabelsFunc: labelsFunc, + Name: name, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateServiceManager(name string, oFunc func(v1.Object, *corev1.Service, resources.Action) error, labelsFunc func(v1.Object) map[string]string, serviceFile string) *service.Manager { + return &service.Manager{ + Client: m.Client, + Scheme: m.Scheme, + ServiceFile: serviceFile, + LabelsFunc: labelsFunc, + Name: name, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreatePVCManager(name string, oFunc func(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error, labelsFunc func(v1.Object) map[string]string, pvcFile string) resources.Manager { + return &pvc.Manager{ + Client: m.Client, + Scheme: m.Scheme, + PVCFile: pvcFile, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreatePVManager(name string, oFunc func(v1.Object, *corev1.PersistentVolume, resources.Action) error, labelsFunc func(v1.Object) map[string]string) resources.Manager { + return &pv.Manager{ + Client: m.Client, + Scheme: m.Scheme, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateConfigMapManager(name string, oFunc func(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error, labelsFunc func(v1.Object) map[string]string, file string, options map[string]interface{}) resources.Manager { + return &configmap.Manager{ + Client: m.Client, + Scheme: m.Scheme, + ConfigMapFile: file, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + Options: options, + } +} + +func (m *Manager) CreateRoleManager(name string, oFunc func(v1.Object, *rbacv1.Role, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &role.Manager{ + Client: m.Client, + Scheme: m.Scheme, + RoleFile: file, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateRoleBindingManager(name string, oFunc func(v1.Object, *rbacv1.RoleBinding, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &rolebinding.Manager{ + Client: m.Client, + Scheme: m.Scheme, + RoleBindingFile: file, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateServiceAccountManager(name string, oFunc func(v1.Object, *corev1.ServiceAccount, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &serviceaccount.Manager{ + Client: m.Client, + Scheme: m.Scheme, + ServiceAccountFile: file, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateRouteManager(name string, oFunc func(v1.Object, *routev1.Route, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &route.Manager{ + Client: m.Client, + Scheme: m.Scheme, + RouteFile: file, + Name: name, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateIngressManager(suffix string, oFunc func(v1.Object, *networkingv1.Ingress, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &ingress.Manager{ + Client: m.Client, + Scheme: m.Scheme, + IngressFile: file, + Suffix: suffix, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateIngressv1beta1Manager(suffix string, oFunc func(v1.Object, *networkingv1beta1.Ingress, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &ingressv1beta1.Manager{ + Client: m.Client, + Scheme: m.Scheme, + IngressFile: file, + Suffix: suffix, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} + +func (m *Manager) CreateOrderernodeManager(suffix string, oFunc func(v1.Object, *current.IBPOrderer, resources.Action) error, labelsFunc func(v1.Object) map[string]string, file string) resources.Manager { + return &orderernode.Manager{ + Client: m.Client, + Scheme: m.Scheme, + OrdererNodeFile: file, + LabelsFunc: labelsFunc, + OverrideFunc: oFunc, + } +} diff --git a/pkg/manager/resources/mocks/resource_manager.go b/pkg/manager/resources/mocks/resource_manager.go new file mode 100644 index 00000000..e09fd941 --- /dev/null +++ b/pkg/manager/resources/mocks/resource_manager.go @@ -0,0 +1,603 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ResourceManager struct { + CheckStateStub func(v1.Object) error + checkStateMutex sync.RWMutex + checkStateArgsForCall []struct { + arg1 v1.Object + } + checkStateReturns struct { + result1 error + } + checkStateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(v1.Object) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 v1.Object + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + ExistsStub func(v1.Object) bool + existsMutex sync.RWMutex + existsArgsForCall []struct { + arg1 v1.Object + } + existsReturns struct { + result1 bool + } + existsReturnsOnCall map[int]struct { + result1 bool + } + GetStub func(v1.Object) (client.Object, error) + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 v1.Object + } + getReturns struct { + result1 client.Object + result2 error + } + getReturnsOnCall map[int]struct { + result1 client.Object + result2 error + } + GetNameStub func(v1.Object) string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + arg1 v1.Object + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + ReconcileStub func(v1.Object, bool) error + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 v1.Object + arg2 bool + } + reconcileReturns struct { + result1 error + } + reconcileReturnsOnCall map[int]struct { + result1 error + } + RestoreStateStub func(v1.Object) error + restoreStateMutex sync.RWMutex + restoreStateArgsForCall []struct { + arg1 v1.Object + } + restoreStateReturns struct { + result1 error + } + restoreStateReturnsOnCall map[int]struct { + result1 error + } + SetCustomNameStub func(string) + setCustomNameMutex sync.RWMutex + setCustomNameArgsForCall []struct { + arg1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *ResourceManager) CheckState(arg1 v1.Object) error { + fake.checkStateMutex.Lock() + ret, specificReturn := fake.checkStateReturnsOnCall[len(fake.checkStateArgsForCall)] + fake.checkStateArgsForCall = append(fake.checkStateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.CheckStateStub + fakeReturns := fake.checkStateReturns + fake.recordInvocation("CheckState", []interface{}{arg1}) + fake.checkStateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ResourceManager) CheckStateCallCount() int { + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + return len(fake.checkStateArgsForCall) +} + +func (fake *ResourceManager) CheckStateCalls(stub func(v1.Object) error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = stub +} + +func (fake *ResourceManager) CheckStateArgsForCall(i int) v1.Object { + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + argsForCall := fake.checkStateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) CheckStateReturns(result1 error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = nil + fake.checkStateReturns = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) CheckStateReturnsOnCall(i int, result1 error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = nil + if fake.checkStateReturnsOnCall == nil { + fake.checkStateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkStateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) Delete(arg1 v1.Object) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ResourceManager) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *ResourceManager) DeleteCalls(stub func(v1.Object) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *ResourceManager) DeleteArgsForCall(i int) v1.Object { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) Exists(arg1 v1.Object) bool { + fake.existsMutex.Lock() + ret, specificReturn := fake.existsReturnsOnCall[len(fake.existsArgsForCall)] + fake.existsArgsForCall = append(fake.existsArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ExistsStub + fakeReturns := fake.existsReturns + fake.recordInvocation("Exists", []interface{}{arg1}) + fake.existsMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ResourceManager) ExistsCallCount() int { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + return len(fake.existsArgsForCall) +} + +func (fake *ResourceManager) ExistsCalls(stub func(v1.Object) bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = stub +} + +func (fake *ResourceManager) ExistsArgsForCall(i int) v1.Object { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + argsForCall := fake.existsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) ExistsReturns(result1 bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + fake.existsReturns = struct { + result1 bool + }{result1} +} + +func (fake *ResourceManager) ExistsReturnsOnCall(i int, result1 bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + if fake.existsReturnsOnCall == nil { + fake.existsReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.existsReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *ResourceManager) Get(arg1 v1.Object) (client.Object, error) { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *ResourceManager) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *ResourceManager) GetCalls(stub func(v1.Object) (client.Object, error)) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *ResourceManager) GetArgsForCall(i int) v1.Object { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) GetReturns(result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *ResourceManager) GetReturnsOnCall(i int, result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 client.Object + result2 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *ResourceManager) GetName(arg1 v1.Object) string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{arg1}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ResourceManager) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *ResourceManager) GetNameCalls(stub func(v1.Object) string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *ResourceManager) GetNameArgsForCall(i int) v1.Object { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + argsForCall := fake.getNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *ResourceManager) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ResourceManager) Reconcile(arg1 v1.Object, arg2 bool) error { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 v1.Object + arg2 bool + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ResourceManager) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *ResourceManager) ReconcileCalls(stub func(v1.Object, bool) error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *ResourceManager) ReconcileArgsForCall(i int) (v1.Object, bool) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *ResourceManager) ReconcileReturns(result1 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) ReconcileReturnsOnCall(i int, result1 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) RestoreState(arg1 v1.Object) error { + fake.restoreStateMutex.Lock() + ret, specificReturn := fake.restoreStateReturnsOnCall[len(fake.restoreStateArgsForCall)] + fake.restoreStateArgsForCall = append(fake.restoreStateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.RestoreStateStub + fakeReturns := fake.restoreStateReturns + fake.recordInvocation("RestoreState", []interface{}{arg1}) + fake.restoreStateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ResourceManager) RestoreStateCallCount() int { + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + return len(fake.restoreStateArgsForCall) +} + +func (fake *ResourceManager) RestoreStateCalls(stub func(v1.Object) error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = stub +} + +func (fake *ResourceManager) RestoreStateArgsForCall(i int) v1.Object { + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + argsForCall := fake.restoreStateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) RestoreStateReturns(result1 error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = nil + fake.restoreStateReturns = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) RestoreStateReturnsOnCall(i int, result1 error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = nil + if fake.restoreStateReturnsOnCall == nil { + fake.restoreStateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.restoreStateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ResourceManager) SetCustomName(arg1 string) { + fake.setCustomNameMutex.Lock() + fake.setCustomNameArgsForCall = append(fake.setCustomNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetCustomNameStub + fake.recordInvocation("SetCustomName", []interface{}{arg1}) + fake.setCustomNameMutex.Unlock() + if stub != nil { + fake.SetCustomNameStub(arg1) + } +} + +func (fake *ResourceManager) SetCustomNameCallCount() int { + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + return len(fake.setCustomNameArgsForCall) +} + +func (fake *ResourceManager) SetCustomNameCalls(stub func(string)) { + fake.setCustomNameMutex.Lock() + defer fake.setCustomNameMutex.Unlock() + fake.SetCustomNameStub = stub +} + +func (fake *ResourceManager) SetCustomNameArgsForCall(i int) string { + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + argsForCall := fake.setCustomNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ResourceManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *ResourceManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ resources.Manager = new(ResourceManager) diff --git a/pkg/manager/resources/orderernode/manager.go b/pkg/manager/resources/orderernode/manager.go new file mode 100644 index 00000000..490ddbca --- /dev/null +++ b/pkg/manager/resources/orderernode/manager.go @@ -0,0 +1,307 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderernode + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/go-test/deep" + "github.com/pkg/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("orderernode_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + OrdererNodeFile string + IgnoreDifferences []string + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *current.IBPOrderer, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + name := instance.GetName() + switch instance.(type) { + case *current.IBPOrderer: + ordererspec := instance.(*current.IBPOrderer) + if ordererspec.Spec.NodeNumber != nil { + name = fmt.Sprintf("%snode%d", instance.GetName(), *ordererspec.Spec.NodeNumber) + } + } + return GetName(name) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + + orderernode := ¤t.IBPOrderer{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, orderernode) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating orderernode '%s'", name)) + orderernode, err = m.GetOrdererNodeBasedOnCRFromFile(instance) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("Setting controller reference instance name: %s, orderernode name: %s", instance.GetName(), orderernode.GetName())) + err = m.Client.Create(context.TODO(), orderernode, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + if update { + log.Info(fmt.Sprintf("Updating orderer node is not allowed programmatically '%s'", name)) + return operatorerrors.New(operatorerrors.InvalidOrdererNodeUpdateRequest, "Updating orderer node is not allowed programmatically") + } + + return nil +} + +func (m *Manager) GetOrdererNodeBasedOnCRFromFile(instance v1.Object) (*current.IBPOrderer, error) { + orderernode, err := GetOrderernodeFromFile(m.OrdererNodeFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading deployment configuration file: %s", m.OrdererNodeFile)) + return nil, err + } + + return m.BasedOnCR(instance, orderernode) +} + +func (m *Manager) BasedOnCR(instance v1.Object, orderernode *current.IBPOrderer) (*current.IBPOrderer, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, orderernode, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidOrdererNodeCreateRequest, err.Error()) + } + } + + orderernode.Name = m.GetName(instance) + orderernode.Namespace = instance.GetNamespace() + orderernode.ObjectMeta.Name = m.GetName(instance) + orderernode.ObjectMeta.Namespace = instance.GetNamespace() + + orderernode.Labels = m.LabelsFunc(instance) + + return orderernode, nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + if instance == nil { + return nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + + // Get the latest version of the instance + orderernode := ¤t.IBPOrderer{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, orderernode) + if err != nil { + return nil + } + + copy := orderernode.DeepCopy() + expectedOrderernode, err := m.BasedOnCR(instance, copy) + if err != nil { + return err + } + + deep.MaxDepth = 20 + deep.MaxDiff = 30 + deep.CompareUnexportedFields = true + deep.LogErrors = true + + diff := deep.Equal(orderernode.Spec, expectedOrderernode.Spec) + if diff != nil { + err := m.ignoreDifferences(diff) + if err != nil { + return errors.Wrap(err, "orderernode has been edited manually, and does not match what is expected based on the CR") + } + } + + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + if instance == nil { + return nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + orderernode := ¤t.IBPOrderer{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, orderernode) + if err != nil { + return nil + } + + orderernode, err = m.BasedOnCR(instance, orderernode) + if err != nil { + return err + } + + err = m.Client.Update(context.TODO(), orderernode) + if err != nil { + return err + } + + return nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + orderernode := ¤t.IBPOrderer{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, orderernode) + if err != nil { + return nil, err + } + + return orderernode, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + on, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if on == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), on) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) getSelectorLabels(instance v1.Object) map[string]string { + return map[string]string{ + "app": instance.GetName(), + } +} + +func (m *Manager) ignoreDifferences(diff []string) error { + diffs := []string{} + for _, d := range diff { + found := false + for _, i := range m.differenceToIgnore() { + regex := regexp.MustCompile(i) + found = regex.MatchString(d) + if found { + break + } + } + if !found { + diffs = append(diffs, d) + return fmt.Errorf("unexpected mismatch: %s", d) + } + } + return nil +} + +func (m *Manager) differenceToIgnore() []string { + d := []string{ + "TypeMeta", "ObjectMeta", + } + d = append(d, m.IgnoreDifferences...) + return d +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func GetName(instanceName string, suffix ...string) string { + if len(suffix) != 0 { + if suffix[0] != "" { + return fmt.Sprintf("%s-%s", instanceName, suffix[0]) + } + } + return fmt.Sprintf("%s", instanceName) +} + +func GetOrderernodeFromFile(file string) (*current.IBPOrderer, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + on := ¤t.IBPOrderer{} + err = json.Unmarshal(jsonBytes, &on) + if err != nil { + return nil, err + } + + return on, nil +} + +func ConvertYamlFileToJson(file string) ([]byte, error) { + absfilepath, err := filepath.Abs(file) + if err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Clean(absfilepath)) + if err != nil { + return nil, err + } + + return yaml.ToJSON(bytes) +} diff --git a/pkg/manager/resources/orderernode/manager_test.go b/pkg/manager/resources/orderernode/manager_test.go new file mode 100644 index 00000000..e80b6e57 --- /dev/null +++ b/pkg/manager/resources/orderernode/manager_test.go @@ -0,0 +1,170 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderernode_test + +import ( + "context" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/orderernode" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Orderernode manager", func() { + var ( + mockKubeClient *mocks.Client + manager *orderernode.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPOrderer: + o := obj.(*current.IBPOrderer) + manager.BasedOnCR(instance, o) + } + return nil + } + + manager = &orderernode.Manager{ + OrdererNodeFile: "../../../../definitions/orderer/orderernode.yaml", + Client: mockKubeClient, + OverrideFunc: func(object v1.Object, d *current.IBPOrderer, action resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + + }) + + Context("reconciles the orderernode instance", func() { + It("does not try to create orderernode if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("orderernode does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if the creation of the Orderernode fails", func() { + errMsg := "unable to create orderernode" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull Orderernode creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + When("orderernode already exists", func() { + It("returns an error if orderernode is updated", func() { + errMsg := "Updating orderer node is not allowed programmatically" + err := manager.Reconcile(instance, true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring(errMsg)) + }) + }) + }) + + Context("check orderernode state", func() { + // TODO fix this test + // It("returns an error if an unexpected change in orderernode is detected", func() { + // num := 1 + // dep := ¤t.IBPOrderer{ + // Spec: current.IBPOrdererSpec{ + // NodeNumber: &num, + // }, + // } + // mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj runtime.Object) error { + // switch obj.(type) { + // case *current.IBPOrderer: + // on := obj.(*current.IBPOrderer) + // on.Spec = current.IBPOrdererSpec{ + // NodeNumber: &num, + // Arch: []string{"s390x"}, + // } + // } + // return nil + // } + + // err := manager.CheckState(dep) + // Expect(err).To(HaveOccurred()) + // Expect(err.Error()).To(ContainSubstring("orderernode has been edited manually, and does not match what is expected based on the CR: unexpected mismatch")) + // }) + + // It("returns no error if no changes detected for orderernode", func() { + // err := manager.CheckState(&appsv1.Deployment{}) + // Expect(err).NotTo(HaveOccurred()) + // }) + }) + + Context("restore orderernode state", func() { + It("returns an error if the restoring orderernode state fails", func() { + errMsg := "unable to restore orderernode" + mockKubeClient.UpdateReturns(errors.New(errMsg)) + num := 1 + err := manager.RestoreState(¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + NodeNumber: &num, + }, + }) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("returns no error if able to restore orderernode state", func() { + num := 1 + err := manager.RestoreState(¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + NodeNumber: &num, + }, + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/manager/resources/orderernode/orderernode_suite_test.go b/pkg/manager/resources/orderernode/orderernode_suite_test.go new file mode 100644 index 00000000..e193d321 --- /dev/null +++ b/pkg/manager/resources/orderernode/orderernode_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orderernode_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestDeployment(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Deployment Suite") +} diff --git a/pkg/manager/resources/pv/manager.go b/pkg/manager/resources/pv/manager.go new file mode 100644 index 00000000..b3f11376 --- /dev/null +++ b/pkg/manager/resources/pv/manager.go @@ -0,0 +1,158 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pv + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("pv_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *corev1.PersistentVolume, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + return fmt.Sprintf("%s-%s", instance.GetNamespace(), instance.GetName()) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, &corev1.PersistentVolume{}) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating pv '%s'", name)) + pv, err := m.GetPVFromTemplate(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), pv, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetPVFromTemplate(instance v1.Object) (*corev1.PersistentVolume, error) { + pvc := &corev1.PersistentVolume{ + ObjectMeta: v1.ObjectMeta{ + Name: m.GetName(instance), + Namespace: instance.GetNamespace(), + Labels: m.LabelsFunc(instance), + }, + } + + return m.BasedOnCR(instance, pvc) +} + +func (m *Manager) BasedOnCR(instance v1.Object, pvc *corev1.PersistentVolume) (*corev1.PersistentVolume, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, pvc, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidPVCCreateRequest, err.Error()) + } + } + + return pvc, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + pvc := &corev1.PersistentVolume{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, pvc) + if err != nil { + return nil, err + } + + return pvc, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + pvc, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if pvc == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), pvc) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} diff --git a/pkg/manager/resources/pv/manager_test.go b/pkg/manager/resources/pv/manager_test.go new file mode 100644 index 00000000..edc2c0ac --- /dev/null +++ b/pkg/manager/resources/pv/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pv_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/pvc" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("PVC manager", func() { + var ( + mockKubeClient *mocks.Client + manager *pvc.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &pvc.Manager{ + PVCFile: "../../../../definitions/ca/pvc.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the pvc instance", func() { + It("does not try to create pvc if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("pvc does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.PVCFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override pvc value fails", func() { + manager.OverrideFunc = func(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the PVC fails", func() { + errMsg := "unable to create pvc" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull PVC creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/pv/pvc_suite_test.go b/pkg/manager/resources/pv/pvc_suite_test.go new file mode 100644 index 00000000..c65db070 --- /dev/null +++ b/pkg/manager/resources/pv/pvc_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pv_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPvc(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Pvc Suite") +} diff --git a/pkg/manager/resources/pvc/manager.go b/pkg/manager/resources/pvc/manager.go new file mode 100644 index 00000000..3dd52b25 --- /dev/null +++ b/pkg/manager/resources/pvc/manager.go @@ -0,0 +1,172 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pvc + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("pvc_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + PVCFile string + Name string + CustomName string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.CustomName != "" { + return m.CustomName + } + + if m.Name != "" { + return fmt.Sprintf("%s-%s-pvc", instance.GetName(), m.Name) + } + + return fmt.Sprintf("%s-pvc", instance.GetName()) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, &corev1.PersistentVolumeClaim{}) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating pvc '%s'", name)) + pvc, err := m.GetPVCBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), pvc, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetPVCBasedOnCRFromFile(instance v1.Object) (*corev1.PersistentVolumeClaim, error) { + pvc, err := util.GetPVCFromFile(m.PVCFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading pvc configuration file: %s", m.PVCFile)) + return nil, err + } + + pvc.Name = m.GetName(instance) + pvc.Namespace = instance.GetNamespace() + pvc.Labels = m.LabelsFunc(instance) + + return m.BasedOnCR(instance, pvc) +} + +func (m *Manager) BasedOnCR(instance v1.Object, pvc *corev1.PersistentVolumeClaim) (*corev1.PersistentVolumeClaim, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, pvc, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidPVCCreateRequest, err.Error()) + } + } + + return pvc, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + pvc := &corev1.PersistentVolumeClaim{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, pvc) + if err != nil { + return nil, err + } + + return pvc, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + pvc, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if pvc == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), pvc) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + m.CustomName = name +} diff --git a/pkg/manager/resources/pvc/manager_test.go b/pkg/manager/resources/pvc/manager_test.go new file mode 100644 index 00000000..83e8d9cb --- /dev/null +++ b/pkg/manager/resources/pvc/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pvc_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/pvc" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("PVC manager", func() { + var ( + mockKubeClient *mocks.Client + manager *pvc.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &pvc.Manager{ + PVCFile: "../../../../definitions/ca/pvc.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the pvc instance", func() { + It("does not try to create pvc if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("pvc does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.PVCFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override pvc value fails", func() { + manager.OverrideFunc = func(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the PVC fails", func() { + errMsg := "unable to create pvc" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull PVC creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/pvc/pvc_suite_test.go b/pkg/manager/resources/pvc/pvc_suite_test.go new file mode 100644 index 00000000..367b23bd --- /dev/null +++ b/pkg/manager/resources/pvc/pvc_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pvc_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPvc(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Pvc Suite") +} diff --git a/pkg/manager/resources/resources.go b/pkg/manager/resources/resources.go new file mode 100644 index 00000000..70cbd365 --- /dev/null +++ b/pkg/manager/resources/resources.go @@ -0,0 +1,45 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resources + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Action string + +const ( + Create Action = "CREATE" + Update Action = "UPDATE" + Restart Action = "RESTART" +) + +//go:generate counterfeiter -o mocks/resource_manager.go -fake-name ResourceManager . Manager + +type Manager interface { + Reconcile(v1.Object, bool) error + CheckState(v1.Object) error + RestoreState(v1.Object) error + Exists(v1.Object) bool + Get(v1.Object) (client.Object, error) + Delete(v1.Object) error + GetName(v1.Object) string + SetCustomName(string) +} diff --git a/pkg/manager/resources/role/manager.go b/pkg/manager/resources/role/manager.go new file mode 100644 index 00000000..b614812f --- /dev/null +++ b/pkg/manager/resources/role/manager.go @@ -0,0 +1,171 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package role + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("role_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + RoleFile string + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *rbacv1.Role, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + return GetName(instance.GetName(), m.Name) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, &rbacv1.Role{}) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating role '%s'", name)) + role, err := m.GetRoleBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), role, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetRoleBasedOnCRFromFile(instance v1.Object) (*rbacv1.Role, error) { + role, err := util.GetRoleFromFile(m.RoleFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading role configuration file: %s", m.RoleFile)) + return nil, err + } + + role.Name = m.GetName(instance) + role.Namespace = instance.GetNamespace() + role.Labels = m.LabelsFunc(instance) + + return m.BasedOnCR(instance, role) +} + +func (m *Manager) BasedOnCR(instance v1.Object, role *rbacv1.Role) (*rbacv1.Role, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, role, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidRoleCreateRequest, err.Error()) + } + } + + return role, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + role := &rbacv1.Role{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, role) + if err != nil { + return nil, err + } + + return role, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + role, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if role == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), role) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func GetName(instanceName string, suffix ...string) string { + if len(suffix) != 0 { + if suffix[0] != "" { + return fmt.Sprintf("%s-%s-role", instanceName, suffix[0]) + } + } + return fmt.Sprintf("%s-role", instanceName) +} diff --git a/pkg/manager/resources/role/manager_test.go b/pkg/manager/resources/role/manager_test.go new file mode 100644 index 00000000..131b4864 --- /dev/null +++ b/pkg/manager/resources/role/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package role_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/role" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + rbacv1 "k8s.io/api/rbac/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Role manager", func() { + var ( + mockKubeClient *mocks.Client + manager *role.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &role.Manager{ + RoleFile: "../../../../definitions/ca/role.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *rbacv1.Role, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the instance", func() { + It("does not try to create role if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("role does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.RoleFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override role value fails", func() { + manager.OverrideFunc = func(v1.Object, *rbacv1.Role, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the role fails", func() { + errMsg := "unable to create role" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull role creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/role/role_suite_test.go b/pkg/manager/resources/role/role_suite_test.go new file mode 100644 index 00000000..1dd291d7 --- /dev/null +++ b/pkg/manager/resources/role/role_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package role_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestRole(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Role Suite") +} diff --git a/pkg/manager/resources/rolebinding/manager.go b/pkg/manager/resources/rolebinding/manager.go new file mode 100644 index 00000000..889ad0a2 --- /dev/null +++ b/pkg/manager/resources/rolebinding/manager.go @@ -0,0 +1,171 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rolebinding + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/role" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("rolebinding_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + RoleBindingFile string + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *rbacv1.RoleBinding, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.Name != "" { + return fmt.Sprintf("%s-%s-rolebinding", instance.GetName(), m.Name) + } + return fmt.Sprintf("%s-rolebinding", instance.GetName()) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, &rbacv1.RoleBinding{}) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating role binding '%s'", name)) + roleBinding, err := m.GetRoleBindingBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), roleBinding, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetRoleBindingBasedOnCRFromFile(instance v1.Object) (*rbacv1.RoleBinding, error) { + roleBinding, err := util.GetRoleBindingFromFile(m.RoleBindingFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading role binding configuration file: %s", m.RoleBindingFile)) + return nil, err + } + + name := m.GetName(instance) + roleBinding.Name = name + roleBinding.RoleRef.Name = role.GetName(instance.GetName()) + roleBinding.Namespace = instance.GetNamespace() + roleBinding.Labels = m.LabelsFunc(instance) + roleBinding.Subjects[0].Name = serviceaccount.GetName(instance.GetName()) + roleBinding.Subjects[0].Namespace = instance.GetNamespace() + + return m.BasedOnCR(instance, roleBinding) +} + +func (m *Manager) BasedOnCR(instance v1.Object, roleBinding *rbacv1.RoleBinding) (*rbacv1.RoleBinding, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, roleBinding, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidRoleBindingCreateRequest, err.Error()) + } + } + + return roleBinding, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + rb := &rbacv1.RoleBinding{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, rb) + if err != nil { + return nil, err + } + + return rb, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + rb, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if rb == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), rb) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} diff --git a/pkg/manager/resources/rolebinding/manager_test.go b/pkg/manager/resources/rolebinding/manager_test.go new file mode 100644 index 00000000..18f90525 --- /dev/null +++ b/pkg/manager/resources/rolebinding/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rolebinding_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/rolebinding" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + rbacv1 "k8s.io/api/rbac/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Role Binding manager", func() { + var ( + mockKubeClient *mocks.Client + manager *rolebinding.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &rolebinding.Manager{ + RoleBindingFile: "../../../../definitions/ca/rolebinding.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *rbacv1.RoleBinding, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the instance", func() { + It("does not try to create role binding if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("role binding does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.RoleBindingFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override role binding value fails", func() { + manager.OverrideFunc = func(v1.Object, *rbacv1.RoleBinding, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the role binding fails", func() { + errMsg := "unable to create role binding" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull role creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/rolebinding/rolebinding_suite_test.go b/pkg/manager/resources/rolebinding/rolebinding_suite_test.go new file mode 100644 index 00000000..9275964e --- /dev/null +++ b/pkg/manager/resources/rolebinding/rolebinding_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rolebinding_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestRolebinding(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Rolebinding Suite") +} diff --git a/pkg/manager/resources/route/manager.go b/pkg/manager/resources/route/manager.go new file mode 100644 index 00000000..9efe8b43 --- /dev/null +++ b/pkg/manager/resources/route/manager.go @@ -0,0 +1,182 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package route + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("route_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + RouteFile string + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *routev1.Route, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.Name != "" { + return GetName(instance, m.Name) + } + return GetName(instance) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + route := &routev1.Route{ + TypeMeta: v1.TypeMeta{ + APIVersion: "route.openshift.io/v1", + Kind: "Route", + }, + } + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, route) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating route '%s'", name)) + route, err := m.GetRouteBasedOnCRFromFile(instance) + if err != nil { + return err + } + route.TypeMeta.APIVersion = "route.openshift.io/v1" + route.TypeMeta.Kind = "Route" + + err = m.Client.Create(context.TODO(), route, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for route goes here + + return nil +} + +func (m *Manager) GetRouteBasedOnCRFromFile(instance v1.Object) (*routev1.Route, error) { + route, err := util.GetRouteFromFile(m.RouteFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading route configuration file: %s", m.RouteFile)) + return nil, err + } + + route.Name = m.GetName(instance) + route.Namespace = instance.GetNamespace() + route.Labels = m.LabelsFunc(instance) + + return m.BasedOnCR(instance, route) +} + +func (m *Manager) BasedOnCR(instance v1.Object, route *routev1.Route) (*routev1.Route, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, route, resources.Create) + if err != nil { + return nil, errors.Wrap(err, "failed during route override") + } + } + + return route, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + route := &routev1.Route{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, route) + if err != nil { + return nil, err + } + + return route, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + route, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if route == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), route) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func GetName(instance v1.Object, suffix ...string) string { + if len(suffix) != 0 { + if suffix[0] != "" { + return fmt.Sprintf("%s-%s", instance.GetName(), suffix[0]) + } + } + return fmt.Sprintf("%s", instance.GetName()) +} diff --git a/pkg/manager/resources/route/manager_test.go b/pkg/manager/resources/route/manager_test.go new file mode 100644 index 00000000..e528937a --- /dev/null +++ b/pkg/manager/resources/route/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package route_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/route" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Route manager", func() { + var ( + mockKubeClient *mocks.Client + manager *route.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &route.Manager{ + RouteFile: "../../../../definitions/ca/route.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *routev1.Route, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the route instance", func() { + It("does not try to create route if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("route does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.RouteFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override route value fails", func() { + manager.OverrideFunc = func(v1.Object, *routev1.Route, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the Route fails", func() { + errMsg := "unable to create route" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull route creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/route/route_suite_test.go b/pkg/manager/resources/route/route_suite_test.go new file mode 100644 index 00000000..78be1555 --- /dev/null +++ b/pkg/manager/resources/route/route_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package route_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfigmap(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Configmap Suite") +} diff --git a/pkg/manager/resources/service/manager.go b/pkg/manager/resources/service/manager.go new file mode 100644 index 00000000..bbc9ce70 --- /dev/null +++ b/pkg/manager/resources/service/manager.go @@ -0,0 +1,182 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("service_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + ServiceFile string + Name string + CustomName string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *corev1.Service, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + if m.CustomName != "" { + return m.CustomName + } + return GetName(instance.GetName(), m.Name) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, &corev1.Service{}) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating service '%s'", name)) + service, err := m.GetServiceBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), service, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetServiceBasedOnCRFromFile(instance v1.Object) (*corev1.Service, error) { + service, err := util.GetServiceFromFile(m.ServiceFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading service configuration file: %s", m.ServiceFile)) + return nil, err + } + + service.Name = m.GetName(instance) + service.Namespace = instance.GetNamespace() + service.Labels = m.LabelsFunc(instance) + service.Spec.Selector = m.getSelectorLabels(instance) + + return m.BasedOnCR(instance, service) +} + +func (m *Manager) BasedOnCR(instance v1.Object, service *corev1.Service) (*corev1.Service, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, service, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidServiceCreateRequest, err.Error()) + } + } + + return service, nil +} + +func (m *Manager) getSelectorLabels(instance v1.Object) map[string]string { + labels := m.LabelsFunc(instance) + return labels +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + service := &corev1.Service{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, service) + if err != nil { + return nil, err + } + + return service, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + service, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if service == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), service) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + m.CustomName = name +} + +func GetName(instanceName string, suffix ...string) string { + if len(suffix) != 0 { + if suffix[0] != "" { + return fmt.Sprintf("%s%s", instanceName, suffix[0]) + } + } + return fmt.Sprintf("%s", instanceName) +} diff --git a/pkg/manager/resources/service/manager_test.go b/pkg/manager/resources/service/manager_test.go new file mode 100644 index 00000000..b530033f --- /dev/null +++ b/pkg/manager/resources/service/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Service manager", func() { + var ( + mockKubeClient *mocks.Client + manager *service.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &service.Manager{ + ServiceFile: "../../../../definitions/ca/service.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *corev1.Service, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the service instance", func() { + It("does not try to create service if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("service does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.ServiceFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override service value fails", func() { + manager.OverrideFunc = func(v1.Object, *corev1.Service, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the Service fails", func() { + errMsg := "unable to create service" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull Service creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/service/service_suite_test.go b/pkg/manager/resources/service/service_suite_test.go new file mode 100644 index 00000000..34738308 --- /dev/null +++ b/pkg/manager/resources/service/service_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestService(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Service Suite") +} diff --git a/pkg/manager/resources/serviceaccount/manager.go b/pkg/manager/resources/serviceaccount/manager.go new file mode 100644 index 00000000..11a19954 --- /dev/null +++ b/pkg/manager/resources/serviceaccount/manager.go @@ -0,0 +1,172 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serviceaccount + +import ( + "context" + "fmt" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("serviceaccount_manager") + +type Manager struct { + Client k8sclient.Client + Scheme *runtime.Scheme + ServiceAccountFile string + Name string + + LabelsFunc func(v1.Object) map[string]string + OverrideFunc func(v1.Object, *corev1.ServiceAccount, resources.Action) error +} + +func (m *Manager) GetName(instance v1.Object) string { + return GetName(instance.GetName(), m.Name) +} + +func (m *Manager) Reconcile(instance v1.Object, update bool) error { + name := m.GetName(instance) + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, &corev1.ServiceAccount{}) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating service account '%s'", name)) + serviceAccount, err := m.GetServiceAccountBasedOnCRFromFile(instance) + if err != nil { + return err + } + + err = m.Client.Create(context.TODO(), serviceAccount, k8sclient.CreateOption{Owner: instance, Scheme: m.Scheme}) + if err != nil { + return err + } + return nil + } + return err + } + + // TODO: If needed, update logic for servie goes here + + return nil +} + +func (m *Manager) GetServiceAccountBasedOnCRFromFile(instance v1.Object) (*corev1.ServiceAccount, error) { + serviceAccount, err := util.GetServiceAccountFromFile(m.ServiceAccountFile) + if err != nil { + log.Error(err, fmt.Sprintf("Error reading service account configuration file: %s", m.ServiceAccountFile)) + return nil, err + } + + name := m.GetName(instance) + serviceAccount.Name = name + serviceAccount.Namespace = instance.GetNamespace() + serviceAccount.Labels = m.LabelsFunc(instance) + + return m.BasedOnCR(instance, serviceAccount) +} + +func (m *Manager) BasedOnCR(instance v1.Object, serviceAccount *corev1.ServiceAccount) (*corev1.ServiceAccount, error) { + if m.OverrideFunc != nil { + err := m.OverrideFunc(instance, serviceAccount, resources.Create) + if err != nil { + return nil, operatorerrors.New(operatorerrors.InvalidServiceAccountCreateRequest, err.Error()) + } + } + + return serviceAccount, nil +} + +func (m *Manager) Get(instance v1.Object) (client.Object, error) { + if instance == nil { + return nil, nil // Instance has not been reconciled yet + } + + name := m.GetName(instance) + sa := &corev1.ServiceAccount{} + err := m.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.GetNamespace()}, sa) + if err != nil { + return nil, err + } + + return sa, nil +} + +func (m *Manager) Exists(instance v1.Object) bool { + _, err := m.Get(instance) + if err != nil { + return false + } + + return true +} + +func (m *Manager) Delete(instance v1.Object) error { + sa, err := m.Get(instance) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + if sa == nil { + return nil + } + + err = m.Client.Delete(context.TODO(), sa) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (m *Manager) CheckState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) RestoreState(instance v1.Object) error { + // NO-OP + return nil +} + +func (m *Manager) SetCustomName(name string) { + // NO-OP +} + +func GetName(instanceName string, suffix ...string) string { + if len(suffix) != 0 { + if suffix[0] != "" { + return fmt.Sprintf("%s-%s", instanceName, suffix[0]) + } + } + return fmt.Sprintf("%s", instanceName) +} diff --git a/pkg/manager/resources/serviceaccount/manager_test.go b/pkg/manager/resources/serviceaccount/manager_test.go new file mode 100644 index 00000000..5936a605 --- /dev/null +++ b/pkg/manager/resources/serviceaccount/manager_test.go @@ -0,0 +1,106 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serviceaccount_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Service Account manager", func() { + var ( + mockKubeClient *mocks.Client + manager *serviceaccount.Manager + instance metav1.Object + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + manager = &serviceaccount.Manager{ + ServiceAccountFile: "../../../../definitions/ca/serviceaccount.yaml", + Client: mockKubeClient, + OverrideFunc: func(v1.Object, *corev1.ServiceAccount, resources.Action) error { + return nil + }, + LabelsFunc: func(v1.Object) map[string]string { + return map[string]string{} + }, + } + + instance = &metav1.ObjectMeta{} + }) + + Context("reconciles the instance", func() { + It("does not try to create service account if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("service account does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if fails to load default config", func() { + manager.ServiceAccountFile = "bad.yaml" + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file or directory")) + }) + + It("returns an error if override service account value fails", func() { + manager.OverrideFunc = func(v1.Object, *corev1.ServiceAccount, resources.Action) error { + return errors.New("creation override failed") + } + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("creation override failed")) + }) + + It("returns an error if the creation of the service account fails", func() { + errMsg := "unable to create service account" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := manager.Reconcile(instance, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull role creation", func() { + err := manager.Reconcile(instance, false) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/manager/resources/serviceaccount/serviceaccount_suite_test.go b/pkg/manager/resources/serviceaccount/serviceaccount_suite_test.go new file mode 100644 index 00000000..cd508d2f --- /dev/null +++ b/pkg/manager/resources/serviceaccount/serviceaccount_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serviceaccount_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestServiceaccount(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Serviceaccount Suite") +} diff --git a/pkg/migrator/initsecret/migrator.go b/pkg/migrator/initsecret/migrator.go new file mode 100644 index 00000000..66eee03d --- /dev/null +++ b/pkg/migrator/initsecret/migrator.go @@ -0,0 +1,113 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package initsecret + +import ( + "errors" + + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +type Secret struct { + Component *MSP `json:"component,omitempty"` + TLS *MSP `json:"tls,omitempty"` +} + +type MSP struct { + Keystore []string `json:"keystore,omitempty"` + SignCerts []string `json:"signcerts,omitempty"` + CACerts []string `json:"cacerts,omitempty"` + IntermediateCerts []string `json:"intermediatecerts,omitempty"` + AdminCerts []string `json:"admincerts,omitempty"` +} + +type Migrator struct { + Secret *Secret +} + +func (m *Migrator) ParseComponentCrypto() (*commonconfig.Response, error) { + crypto := m.Secret.Component + if crypto == nil { + return nil, errors.New("init secret missing component crypto") + } + return m.ParseCrypto(crypto) +} + +func (m *Migrator) ParseTLSCrypto() (*commonconfig.Response, error) { + crypto := m.Secret.TLS + if crypto == nil { + return nil, errors.New("init secret missing TLS crypto") + } + return m.ParseCrypto(crypto) +} + +func (m *Migrator) ParseCrypto(crypto *MSP) (*commonconfig.Response, error) { + signcert := crypto.SignCerts[0] // When would there ever be more then 1 signed cert? Assuming only one as of right now. However, the MSP secret json has this defined as an array + keystore := crypto.Keystore[0] + + signcertBytes, err := util.Base64ToBytes(signcert) + if err != nil { + return nil, err + } + + keystoreBytes, err := util.Base64ToBytes(keystore) + if err != nil { + return nil, err + } + + adminCerts := [][]byte{} + for _, cert := range crypto.AdminCerts { + certBytes, err := util.Base64ToBytes(cert) + if err != nil { + return nil, err + } + + adminCerts = append(adminCerts, certBytes) + } + + caCerts := [][]byte{} + for _, cert := range crypto.CACerts { + certBytes, err := util.Base64ToBytes(cert) + if err != nil { + return nil, err + } + + caCerts = append(caCerts, certBytes) + } + + interCerts := [][]byte{} + for _, cert := range crypto.IntermediateCerts { + certBytes, err := util.Base64ToBytes(cert) + if err != nil { + return nil, err + } + + interCerts = append(interCerts, certBytes) + } + + return &commonconfig.Response{ + SignCert: signcertBytes, + Keystore: keystoreBytes, + CACerts: caCerts, + AdminCerts: adminCerts, + IntermediateCerts: interCerts, + }, nil + +} diff --git a/pkg/migrator/migrator.go b/pkg/migrator/migrator.go new file mode 100644 index 00000000..999ddeb5 --- /dev/null +++ b/pkg/migrator/migrator.go @@ -0,0 +1,55 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package migrator + +import ( + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/global" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var log = logf.Log + +type Migrator struct { + Client k8sclient.Client + Reader client.Reader + Config *config.Config + Namespace string +} + +func New(mgr manager.Manager, cfg *config.Config, namespace string) *Migrator { + client := k8sclient.New(mgr.GetClient(), &global.ConfigSetter{}) + reader := mgr.GetAPIReader() + return &Migrator{ + Client: client, + Reader: reader, + Config: cfg, + Namespace: namespace, + } +} + +func (m *Migrator) Migrate() error { + + // No-op + + return nil +} diff --git a/pkg/migrator/peer/fabric/fabric_suite_test.go b/pkg/migrator/peer/fabric/fabric_suite_test.go new file mode 100644 index 00000000..2384c88a --- /dev/null +++ b/pkg/migrator/peer/fabric/fabric_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fabric_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestFabric(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Fabric Suite") +} diff --git a/pkg/migrator/peer/fabric/migrator.go b/pkg/migrator/peer/fabric/migrator.go new file mode 100644 index 00000000..3d92eb4a --- /dev/null +++ b/pkg/migrator/peer/fabric/migrator.go @@ -0,0 +1,72 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fabric + +import ( + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("peer_fabric_migrator") + +type Version string + +const ( + V2 Version = "V2" +) + +//go:generate counterfeiter -o mocks/migrator.go -fake-name Migrator . Migrator +type Migrator interface { + MigrationNeeded(metav1.Object) bool + UpgradeDBs(metav1.Object, config.DBMigrationTimeouts) error + UpdateConfig(metav1.Object, string) error + SetChaincodeLauncherResourceOnCR(metav1.Object) error +} + +func V2Migrate(instance metav1.Object, migrator Migrator, version string, timeouts config.DBMigrationTimeouts) error { + if !migrator.MigrationNeeded(instance) { + log.Info("Migration not needed, skipping migration") + return nil + } + log.Info("Migration is needed, starting migration") + + if err := migrator.SetChaincodeLauncherResourceOnCR(instance); err != nil { + return errors.Wrap(err, "failed to update chaincode launcher resources on CR") + } + + if err := migrator.UpdateConfig(instance, version); err != nil { + return errors.Wrap(err, "failed to update config") + } + + if err := migrator.UpgradeDBs(instance, timeouts); err != nil { + return errors.Wrap(err, "failed to upgrade peer's dbs") + } + + return nil +} + +func V24Migrate(instance metav1.Object, migrator Migrator, version string, timeouts config.DBMigrationTimeouts) error { + if err := migrator.UpdateConfig(instance, version); err != nil { + return errors.Wrap(err, "failed to update v2.4.1 configs") + } + return nil +} diff --git a/pkg/migrator/peer/fabric/migrator_test.go b/pkg/migrator/peer/fabric/migrator_test.go new file mode 100644 index 00000000..9d403e01 --- /dev/null +++ b/pkg/migrator/peer/fabric/migrator_test.go @@ -0,0 +1,86 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fabric_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/mocks" +) + +var _ = Describe("Peer migrator", func() { + var ( + migrator *mocks.Migrator + instance *current.IBPPeer + ) + const FABRIC_V2 = "2.2.5-1" + + BeforeEach(func() { + migrator = &mocks.Migrator{} + migrator.MigrationNeededReturns(true) + + instance = ¤t.IBPPeer{} + }) + + Context("migrate to version", func() { + Context("V2", func() { + It("returns error on failure", func() { + migrator.UpgradeDBsReturns(errors.New("failed to reset peer")) + err := fabric.V2Migrate(instance, migrator, FABRIC_V2, config.DBMigrationTimeouts{}) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("failed to reset peer"))) + }) + + It("migrates", func() { + err := fabric.V2Migrate(instance, migrator, FABRIC_V2, config.DBMigrationTimeouts{}) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Context("V2 migration", func() { + It("returns immediately when migration not needed", func() { + migrator.MigrationNeededReturns(false) + err := fabric.V2Migrate(instance, migrator, FABRIC_V2, config.DBMigrationTimeouts{}) + Expect(err).NotTo(HaveOccurred()) + Expect(migrator.UpdateConfigCallCount()).To(Equal(0)) + Expect(migrator.UpgradeDBsCallCount()).To(Equal(0)) + }) + + It("returns an error if unable to update config", func() { + migrator.UpdateConfigReturns(errors.New("failed to update config")) + err := fabric.V2Migrate(instance, migrator, FABRIC_V2, config.DBMigrationTimeouts{}) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("failed to update config"))) + }) + + It("returns an error if unable to reset peer", func() { + migrator.UpgradeDBsReturns(errors.New("failed to reset peer")) + err := fabric.V2Migrate(instance, migrator, FABRIC_V2, config.DBMigrationTimeouts{}) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("failed to reset peer"))) + }) + }) +}) diff --git a/pkg/migrator/peer/fabric/mocks/migrator.go b/pkg/migrator/peer/fabric/mocks/migrator.go new file mode 100644 index 00000000..40c6f877 --- /dev/null +++ b/pkg/migrator/peer/fabric/mocks/migrator.go @@ -0,0 +1,339 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Migrator struct { + MigrationNeededStub func(v1.Object) bool + migrationNeededMutex sync.RWMutex + migrationNeededArgsForCall []struct { + arg1 v1.Object + } + migrationNeededReturns struct { + result1 bool + } + migrationNeededReturnsOnCall map[int]struct { + result1 bool + } + SetChaincodeLauncherResourceOnCRStub func(v1.Object) error + setChaincodeLauncherResourceOnCRMutex sync.RWMutex + setChaincodeLauncherResourceOnCRArgsForCall []struct { + arg1 v1.Object + } + setChaincodeLauncherResourceOnCRReturns struct { + result1 error + } + setChaincodeLauncherResourceOnCRReturnsOnCall map[int]struct { + result1 error + } + UpdateConfigStub func(v1.Object, string) error + updateConfigMutex sync.RWMutex + updateConfigArgsForCall []struct { + arg1 v1.Object + arg2 string + } + updateConfigReturns struct { + result1 error + } + updateConfigReturnsOnCall map[int]struct { + result1 error + } + UpgradeDBsStub func(v1.Object, operatorconfig.DBMigrationTimeouts) error + upgradeDBsMutex sync.RWMutex + upgradeDBsArgsForCall []struct { + arg1 v1.Object + arg2 operatorconfig.DBMigrationTimeouts + } + upgradeDBsReturns struct { + result1 error + } + upgradeDBsReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Migrator) MigrationNeeded(arg1 v1.Object) bool { + fake.migrationNeededMutex.Lock() + ret, specificReturn := fake.migrationNeededReturnsOnCall[len(fake.migrationNeededArgsForCall)] + fake.migrationNeededArgsForCall = append(fake.migrationNeededArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.MigrationNeededStub + fakeReturns := fake.migrationNeededReturns + fake.recordInvocation("MigrationNeeded", []interface{}{arg1}) + fake.migrationNeededMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Migrator) MigrationNeededCallCount() int { + fake.migrationNeededMutex.RLock() + defer fake.migrationNeededMutex.RUnlock() + return len(fake.migrationNeededArgsForCall) +} + +func (fake *Migrator) MigrationNeededCalls(stub func(v1.Object) bool) { + fake.migrationNeededMutex.Lock() + defer fake.migrationNeededMutex.Unlock() + fake.MigrationNeededStub = stub +} + +func (fake *Migrator) MigrationNeededArgsForCall(i int) v1.Object { + fake.migrationNeededMutex.RLock() + defer fake.migrationNeededMutex.RUnlock() + argsForCall := fake.migrationNeededArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Migrator) MigrationNeededReturns(result1 bool) { + fake.migrationNeededMutex.Lock() + defer fake.migrationNeededMutex.Unlock() + fake.MigrationNeededStub = nil + fake.migrationNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Migrator) MigrationNeededReturnsOnCall(i int, result1 bool) { + fake.migrationNeededMutex.Lock() + defer fake.migrationNeededMutex.Unlock() + fake.MigrationNeededStub = nil + if fake.migrationNeededReturnsOnCall == nil { + fake.migrationNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrationNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Migrator) SetChaincodeLauncherResourceOnCR(arg1 v1.Object) error { + fake.setChaincodeLauncherResourceOnCRMutex.Lock() + ret, specificReturn := fake.setChaincodeLauncherResourceOnCRReturnsOnCall[len(fake.setChaincodeLauncherResourceOnCRArgsForCall)] + fake.setChaincodeLauncherResourceOnCRArgsForCall = append(fake.setChaincodeLauncherResourceOnCRArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.SetChaincodeLauncherResourceOnCRStub + fakeReturns := fake.setChaincodeLauncherResourceOnCRReturns + fake.recordInvocation("SetChaincodeLauncherResourceOnCR", []interface{}{arg1}) + fake.setChaincodeLauncherResourceOnCRMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Migrator) SetChaincodeLauncherResourceOnCRCallCount() int { + fake.setChaincodeLauncherResourceOnCRMutex.RLock() + defer fake.setChaincodeLauncherResourceOnCRMutex.RUnlock() + return len(fake.setChaincodeLauncherResourceOnCRArgsForCall) +} + +func (fake *Migrator) SetChaincodeLauncherResourceOnCRCalls(stub func(v1.Object) error) { + fake.setChaincodeLauncherResourceOnCRMutex.Lock() + defer fake.setChaincodeLauncherResourceOnCRMutex.Unlock() + fake.SetChaincodeLauncherResourceOnCRStub = stub +} + +func (fake *Migrator) SetChaincodeLauncherResourceOnCRArgsForCall(i int) v1.Object { + fake.setChaincodeLauncherResourceOnCRMutex.RLock() + defer fake.setChaincodeLauncherResourceOnCRMutex.RUnlock() + argsForCall := fake.setChaincodeLauncherResourceOnCRArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Migrator) SetChaincodeLauncherResourceOnCRReturns(result1 error) { + fake.setChaincodeLauncherResourceOnCRMutex.Lock() + defer fake.setChaincodeLauncherResourceOnCRMutex.Unlock() + fake.SetChaincodeLauncherResourceOnCRStub = nil + fake.setChaincodeLauncherResourceOnCRReturns = struct { + result1 error + }{result1} +} + +func (fake *Migrator) SetChaincodeLauncherResourceOnCRReturnsOnCall(i int, result1 error) { + fake.setChaincodeLauncherResourceOnCRMutex.Lock() + defer fake.setChaincodeLauncherResourceOnCRMutex.Unlock() + fake.SetChaincodeLauncherResourceOnCRStub = nil + if fake.setChaincodeLauncherResourceOnCRReturnsOnCall == nil { + fake.setChaincodeLauncherResourceOnCRReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.setChaincodeLauncherResourceOnCRReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Migrator) UpdateConfig(arg1 v1.Object, arg2 string) error { + fake.updateConfigMutex.Lock() + ret, specificReturn := fake.updateConfigReturnsOnCall[len(fake.updateConfigArgsForCall)] + fake.updateConfigArgsForCall = append(fake.updateConfigArgsForCall, struct { + arg1 v1.Object + arg2 string + }{arg1, arg2}) + stub := fake.UpdateConfigStub + fakeReturns := fake.updateConfigReturns + fake.recordInvocation("UpdateConfig", []interface{}{arg1, arg2}) + fake.updateConfigMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Migrator) UpdateConfigCallCount() int { + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + return len(fake.updateConfigArgsForCall) +} + +func (fake *Migrator) UpdateConfigCalls(stub func(v1.Object, string) error) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = stub +} + +func (fake *Migrator) UpdateConfigArgsForCall(i int) (v1.Object, string) { + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + argsForCall := fake.updateConfigArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *Migrator) UpdateConfigReturns(result1 error) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = nil + fake.updateConfigReturns = struct { + result1 error + }{result1} +} + +func (fake *Migrator) UpdateConfigReturnsOnCall(i int, result1 error) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = nil + if fake.updateConfigReturnsOnCall == nil { + fake.updateConfigReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateConfigReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Migrator) UpgradeDBs(arg1 v1.Object, arg2 operatorconfig.DBMigrationTimeouts) error { + fake.upgradeDBsMutex.Lock() + ret, specificReturn := fake.upgradeDBsReturnsOnCall[len(fake.upgradeDBsArgsForCall)] + fake.upgradeDBsArgsForCall = append(fake.upgradeDBsArgsForCall, struct { + arg1 v1.Object + arg2 operatorconfig.DBMigrationTimeouts + }{arg1, arg2}) + stub := fake.UpgradeDBsStub + fakeReturns := fake.upgradeDBsReturns + fake.recordInvocation("UpgradeDBs", []interface{}{arg1, arg2}) + fake.upgradeDBsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Migrator) UpgradeDBsCallCount() int { + fake.upgradeDBsMutex.RLock() + defer fake.upgradeDBsMutex.RUnlock() + return len(fake.upgradeDBsArgsForCall) +} + +func (fake *Migrator) UpgradeDBsCalls(stub func(v1.Object, operatorconfig.DBMigrationTimeouts) error) { + fake.upgradeDBsMutex.Lock() + defer fake.upgradeDBsMutex.Unlock() + fake.UpgradeDBsStub = stub +} + +func (fake *Migrator) UpgradeDBsArgsForCall(i int) (v1.Object, operatorconfig.DBMigrationTimeouts) { + fake.upgradeDBsMutex.RLock() + defer fake.upgradeDBsMutex.RUnlock() + argsForCall := fake.upgradeDBsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *Migrator) UpgradeDBsReturns(result1 error) { + fake.upgradeDBsMutex.Lock() + defer fake.upgradeDBsMutex.Unlock() + fake.UpgradeDBsStub = nil + fake.upgradeDBsReturns = struct { + result1 error + }{result1} +} + +func (fake *Migrator) UpgradeDBsReturnsOnCall(i int, result1 error) { + fake.upgradeDBsMutex.Lock() + defer fake.upgradeDBsMutex.Unlock() + fake.UpgradeDBsStub = nil + if fake.upgradeDBsReturnsOnCall == nil { + fake.upgradeDBsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.upgradeDBsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Migrator) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.migrationNeededMutex.RLock() + defer fake.migrationNeededMutex.RUnlock() + fake.setChaincodeLauncherResourceOnCRMutex.RLock() + defer fake.setChaincodeLauncherResourceOnCRMutex.RUnlock() + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + fake.upgradeDBsMutex.RLock() + defer fake.upgradeDBsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Migrator) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ fabric.Migrator = new(Migrator) diff --git a/pkg/migrator/peer/fabric/v2/mocks/configmapmanager.go b/pkg/migrator/peer/fabric/v2/mocks/configmapmanager.go new file mode 100644 index 00000000..b13b1260 --- /dev/null +++ b/pkg/migrator/peer/fabric/v2/mocks/configmapmanager.go @@ -0,0 +1,195 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v2" + v1 "k8s.io/api/core/v1" +) + +type ConfigMapManager struct { + CreateOrUpdateStub func(*v1beta1.IBPPeer, initializer.CoreConfig) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 initializer.CoreConfig + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + GetCoreConfigStub func(*v1beta1.IBPPeer) (*v1.ConfigMap, error) + getCoreConfigMutex sync.RWMutex + getCoreConfigArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + getCoreConfigReturns struct { + result1 *v1.ConfigMap + result2 error + } + getCoreConfigReturnsOnCall map[int]struct { + result1 *v1.ConfigMap + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *ConfigMapManager) CreateOrUpdate(arg1 *v1beta1.IBPPeer, arg2 initializer.CoreConfig) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 initializer.CoreConfig + }{arg1, arg2}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigMapManager) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *ConfigMapManager) CreateOrUpdateCalls(stub func(*v1beta1.IBPPeer, initializer.CoreConfig) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *ConfigMapManager) CreateOrUpdateArgsForCall(i int) (*v1beta1.IBPPeer, initializer.CoreConfig) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *ConfigMapManager) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *ConfigMapManager) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ConfigMapManager) GetCoreConfig(arg1 *v1beta1.IBPPeer) (*v1.ConfigMap, error) { + fake.getCoreConfigMutex.Lock() + ret, specificReturn := fake.getCoreConfigReturnsOnCall[len(fake.getCoreConfigArgsForCall)] + fake.getCoreConfigArgsForCall = append(fake.getCoreConfigArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.GetCoreConfigStub + fakeReturns := fake.getCoreConfigReturns + fake.recordInvocation("GetCoreConfig", []interface{}{arg1}) + fake.getCoreConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *ConfigMapManager) GetCoreConfigCallCount() int { + fake.getCoreConfigMutex.RLock() + defer fake.getCoreConfigMutex.RUnlock() + return len(fake.getCoreConfigArgsForCall) +} + +func (fake *ConfigMapManager) GetCoreConfigCalls(stub func(*v1beta1.IBPPeer) (*v1.ConfigMap, error)) { + fake.getCoreConfigMutex.Lock() + defer fake.getCoreConfigMutex.Unlock() + fake.GetCoreConfigStub = stub +} + +func (fake *ConfigMapManager) GetCoreConfigArgsForCall(i int) *v1beta1.IBPPeer { + fake.getCoreConfigMutex.RLock() + defer fake.getCoreConfigMutex.RUnlock() + argsForCall := fake.getCoreConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigMapManager) GetCoreConfigReturns(result1 *v1.ConfigMap, result2 error) { + fake.getCoreConfigMutex.Lock() + defer fake.getCoreConfigMutex.Unlock() + fake.GetCoreConfigStub = nil + fake.getCoreConfigReturns = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *ConfigMapManager) GetCoreConfigReturnsOnCall(i int, result1 *v1.ConfigMap, result2 error) { + fake.getCoreConfigMutex.Lock() + defer fake.getCoreConfigMutex.Unlock() + fake.GetCoreConfigStub = nil + if fake.getCoreConfigReturnsOnCall == nil { + fake.getCoreConfigReturnsOnCall = make(map[int]struct { + result1 *v1.ConfigMap + result2 error + }) + } + fake.getCoreConfigReturnsOnCall[i] = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *ConfigMapManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.getCoreConfigMutex.RLock() + defer fake.getCoreConfigMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *ConfigMapManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ v2.ConfigMapManager = new(ConfigMapManager) diff --git a/pkg/migrator/peer/fabric/v2/mocks/deploymentmanager.go b/pkg/migrator/peer/fabric/v2/mocks/deploymentmanager.go new file mode 100644 index 00000000..5165f3e1 --- /dev/null +++ b/pkg/migrator/peer/fabric/v2/mocks/deploymentmanager.go @@ -0,0 +1,338 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v2" + v1a "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type DeploymentManager struct { + DeleteStub func(v1.Object) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 v1.Object + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + DeploymentStatusStub func(v1.Object) (v1a.DeploymentStatus, error) + deploymentStatusMutex sync.RWMutex + deploymentStatusArgsForCall []struct { + arg1 v1.Object + } + deploymentStatusReturns struct { + result1 v1a.DeploymentStatus + result2 error + } + deploymentStatusReturnsOnCall map[int]struct { + result1 v1a.DeploymentStatus + result2 error + } + GetStub func(v1.Object) (client.Object, error) + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 v1.Object + } + getReturns struct { + result1 client.Object + result2 error + } + getReturnsOnCall map[int]struct { + result1 client.Object + result2 error + } + GetSchemeStub func() *runtime.Scheme + getSchemeMutex sync.RWMutex + getSchemeArgsForCall []struct { + } + getSchemeReturns struct { + result1 *runtime.Scheme + } + getSchemeReturnsOnCall map[int]struct { + result1 *runtime.Scheme + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DeploymentManager) Delete(arg1 v1.Object) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *DeploymentManager) DeleteCalls(stub func(v1.Object) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *DeploymentManager) DeleteArgsForCall(i int) v1.Object { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeploymentStatus(arg1 v1.Object) (v1a.DeploymentStatus, error) { + fake.deploymentStatusMutex.Lock() + ret, specificReturn := fake.deploymentStatusReturnsOnCall[len(fake.deploymentStatusArgsForCall)] + fake.deploymentStatusArgsForCall = append(fake.deploymentStatusArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeploymentStatusStub + fakeReturns := fake.deploymentStatusReturns + fake.recordInvocation("DeploymentStatus", []interface{}{arg1}) + fake.deploymentStatusMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) DeploymentStatusCallCount() int { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + return len(fake.deploymentStatusArgsForCall) +} + +func (fake *DeploymentManager) DeploymentStatusCalls(stub func(v1.Object) (v1a.DeploymentStatus, error)) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = stub +} + +func (fake *DeploymentManager) DeploymentStatusArgsForCall(i int) v1.Object { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + argsForCall := fake.deploymentStatusArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeploymentStatusReturns(result1 v1a.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + fake.deploymentStatusReturns = struct { + result1 v1a.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) DeploymentStatusReturnsOnCall(i int, result1 v1a.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + if fake.deploymentStatusReturnsOnCall == nil { + fake.deploymentStatusReturnsOnCall = make(map[int]struct { + result1 v1a.DeploymentStatus + result2 error + }) + } + fake.deploymentStatusReturnsOnCall[i] = struct { + result1 v1a.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) Get(arg1 v1.Object) (client.Object, error) { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *DeploymentManager) GetCalls(stub func(v1.Object) (client.Object, error)) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *DeploymentManager) GetArgsForCall(i int) v1.Object { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) GetReturns(result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetReturnsOnCall(i int, result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 client.Object + result2 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetScheme() *runtime.Scheme { + fake.getSchemeMutex.Lock() + ret, specificReturn := fake.getSchemeReturnsOnCall[len(fake.getSchemeArgsForCall)] + fake.getSchemeArgsForCall = append(fake.getSchemeArgsForCall, struct { + }{}) + stub := fake.GetSchemeStub + fakeReturns := fake.getSchemeReturns + fake.recordInvocation("GetScheme", []interface{}{}) + fake.getSchemeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) GetSchemeCallCount() int { + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + return len(fake.getSchemeArgsForCall) +} + +func (fake *DeploymentManager) GetSchemeCalls(stub func() *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = stub +} + +func (fake *DeploymentManager) GetSchemeReturns(result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + fake.getSchemeReturns = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentManager) GetSchemeReturnsOnCall(i int, result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + if fake.getSchemeReturnsOnCall == nil { + fake.getSchemeReturnsOnCall = make(map[int]struct { + result1 *runtime.Scheme + }) + } + fake.getSchemeReturnsOnCall[i] = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DeploymentManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ v2.DeploymentManager = new(DeploymentManager) diff --git a/pkg/migrator/peer/fabric/v2/peer.go b/pkg/migrator/peer/fabric/v2/peer.go new file mode 100644 index 00000000..67be710e --- /dev/null +++ b/pkg/migrator/peer/fabric/v2/peer.go @@ -0,0 +1,286 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2 + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + v2peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + ver "github.com/IBM-Blockchain/fabric-operator/version" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" +) + +var log = logf.Log.WithName("peer_fabric_migrator") + +//go:generate counterfeiter -o mocks/configmapmanager.go -fake-name ConfigMapManager . ConfigMapManager +type ConfigMapManager interface { + GetCoreConfig(*current.IBPPeer) (*corev1.ConfigMap, error) + CreateOrUpdate(*current.IBPPeer, initializer.CoreConfig) error +} + +//go:generate counterfeiter -o mocks/deploymentmanager.go -fake-name DeploymentManager . DeploymentManager +type DeploymentManager interface { + Get(metav1.Object) (client.Object, error) + Delete(metav1.Object) error + DeploymentStatus(metav1.Object) (appsv1.DeploymentStatus, error) + GetScheme() *runtime.Scheme +} + +type Migrate struct { + DeploymentManager DeploymentManager + ConfigMapManager ConfigMapManager + Client k8sclient.Client +} + +func (m *Migrate) MigrationNeeded(instance metav1.Object) bool { + // Check for DinD container, if DinD container not found this is + // v2 fabric IBP instance + obj, err := m.DeploymentManager.Get(instance) + if err != nil { + // If deployment does not exist, this instance is not a healthy + // state and migration should be avoided + return false + } + + var deploymentUpdated bool + var configUpdated bool + + dep := obj.(*appsv1.Deployment) + for _, cont := range dep.Spec.Template.Spec.Containers { + if strings.ToLower(cont.Name) == "dind" { + // DinD container found, instance is not at v2 + deploymentUpdated = false + } + } + + cm, err := m.ConfigMapManager.GetCoreConfig(instance.(*current.IBPPeer)) + if err != nil { + // If config map does not exist, this instance is not a healthy + // state and migration should be avoided + return false + } + + v1corebytes := cm.BinaryData["core.yaml"] + + core := &v2config.Core{} + err = yaml.Unmarshal(v1corebytes, core) + if err != nil { + return false + } + + configUpdated = configHasBeenUpdated(core) + + return !deploymentUpdated || !configUpdated +} + +func (m *Migrate) UpgradeDBs(instance metav1.Object, timeouts config.DBMigrationTimeouts) error { + log.Info(fmt.Sprintf("Resetting Peer '%s'", instance.GetName())) + return action.UpgradeDBs(m.DeploymentManager, m.Client, instance.(*current.IBPPeer), timeouts) +} + +func (m *Migrate) UpdateConfig(instance metav1.Object, version string) error { + log.Info("Updating config to v2") + cm, err := m.ConfigMapManager.GetCoreConfig(instance.(*current.IBPPeer)) + if err != nil { + return errors.Wrap(err, "failed to get config map") + } + v1corebytes := cm.BinaryData["core.yaml"] + + core := &v2config.Core{} + err = yaml.Unmarshal(v1corebytes, core) + if err != nil { + return err + } + + // resetting VM endpoint + // As per this PR #2165, VM and Ledger structs been added to Peer. endpoint is not required for v2 peer as there is no DinD + core.VM.Endpoint = "" + + core.Chaincode.ExternalBuilders = []v2peer.ExternalBuilder{ + v2peer.ExternalBuilder{ + Name: "ibp-builder", + Path: "/usr/local", + EnvironmentWhiteList: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + }, + PropogateEnvironment: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + "PEER_NAME", + }, + }, + } + + core.Chaincode.InstallTimeout = common.MustParseDuration("300s") + if core.Chaincode.System == nil { + core.Chaincode.System = make(map[string]string) + } + core.Chaincode.System["_lifecycle"] = "enable" + + core.Peer.Limits.Concurrency.DeliverService = 2500 + core.Peer.Limits.Concurrency.EndorserService = 2500 + + core.Peer.Gossip.PvtData.ImplicitCollectionDisseminationPolicy.RequiredPeerCount = 0 + core.Peer.Gossip.PvtData.ImplicitCollectionDisseminationPolicy.MaxPeerCount = 1 + + currentVer := ver.String(version) + if currentVer.EqualWithoutTag(ver.V2_4_1) || currentVer.GreaterThan(ver.V2_4_1) { + trueVal := true + core.Peer.Gateway = v2peer.Gateway{ + Enabled: &trueVal, + EndorsementTimeout: common.MustParseDuration("30s"), + DialTimeout: common.MustParseDuration("120s"), + } + core.Peer.Limits.Concurrency.GatewayService = 500 + core.Ledger.State.SnapShots = v2peer.SnapShots{ + RootDir: "/data/peer/ledgersData/snapshots/", + } + } + + core.Ledger.State.CouchdbConfig.CacheSize = 64 + core.Ledger.State.CouchdbConfig.MaxRetries = 10 + + err = m.ConfigMapManager.CreateOrUpdate(instance.(*current.IBPPeer), core) + if err != nil { + return err + } + + return nil +} + +// SetChaincodeLauncherResourceOnCR will update the peer's CR by adding chaincode launcher +// resources. The default resources are defined in deployer's config map, which is part +// IBPConsole resource. The default resources are extracted for the chaincode launcher +// by reading the deployer's config map and updating the CR. +func (m *Migrate) SetChaincodeLauncherResourceOnCR(instance metav1.Object) error { + log.Info("Setting chaincode launcher resource on CR") + cr := instance.(*current.IBPPeer) + + if cr.Spec.Resources != nil && cr.Spec.Resources.CCLauncher != nil { + // No need to proceed further if Chaincode launcher resources already set + return nil + } + + consoleList := ¤t.IBPConsoleList{} + if err := m.Client.List(context.TODO(), consoleList); err != nil { + return err + } + consoles := consoleList.Items + + // If no consoles found, set default resource for chaincode launcher container + rr := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + } + + if len(consoles) > 0 { + log.Info("Setting chaincode launcher resource on CR based on deployer config from config map") + // Get config map associated with console + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-deployer", consoles[0].GetName()), + Namespace: instance.GetNamespace(), + } + if err := m.Client.Get(context.TODO(), nn, cm); err != nil { + return err + } + + settingsBytes := []byte(cm.Data["settings.yaml"]) + settings := &deployer.Config{} + if err := yaml.Unmarshal(settingsBytes, settings); err != nil { + return err + } + + if settings.Defaults != nil && settings.Defaults.Resources != nil && + settings.Defaults.Resources.Peer != nil && settings.Defaults.Resources.Peer.CCLauncher != nil { + + rr = settings.Defaults.Resources.Peer.CCLauncher + } + } + + log.Info(fmt.Sprintf("Setting chaincode launcher resource on CR to %+v", rr)) + if cr.Spec.Resources == nil { + cr.Spec.Resources = ¤t.PeerResources{} + } + cr.Spec.Resources.CCLauncher = rr + if err := m.Client.Update(context.TODO(), cr); err != nil { + return err + } + + return nil +} + +// Updates required from v1.4 to v2.x: +// - External builders +// - Limits +// - Install timeout +// - Implicit collection dissemination policy +func configHasBeenUpdated(core *v2config.Core) bool { + if len(core.Chaincode.ExternalBuilders) == 0 { + return false + } + if core.Chaincode.ExternalBuilders[0].Name != "ibp-builder" { + return false + } + + // Check if install timeout was set + if reflect.DeepEqual(core.Chaincode.InstallTimeout, common.Duration{}) { + return false + } + + if core.Peer.Limits.Concurrency.DeliverService != 2500 { + return false + } + + if core.Peer.Limits.Concurrency.EndorserService != 2500 { + return false + } + + return true +} diff --git a/pkg/migrator/peer/fabric/v2/peer_test.go b/pkg/migrator/peer/fabric/v2/peer_test.go new file mode 100644 index 00000000..1366bc69 --- /dev/null +++ b/pkg/migrator/peer/fabric/v2/peer_test.go @@ -0,0 +1,367 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2_test + +import ( + "context" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + v2peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v2/mocks" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +var _ = Describe("V2 peer migrator", func() { + var ( + deploymentManager *mocks.DeploymentManager + configMapManager *mocks.ConfigMapManager + client *controllermocks.Client + migrator *v2.Migrate + instance *current.IBPPeer + ) + const FABRIC_V2 = "2.2.5-1" + BeforeEach(func() { + deploymentManager = &mocks.DeploymentManager{} + configMapManager = &mocks.ConfigMapManager{} + client = &controllermocks.Client{} + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ibppeer", + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerImage: "peerimage", + PeerTag: "peertag", + }, + Resources: ¤t.PeerResources{}, + }, + } + + replicas := int32(1) + dep := &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + v1.Container{ + Name: "peer", + }, + v1.Container{ + Name: "dind", + }, + }, + }, + }, + }, + } + deploymentManager.GetReturns(dep, nil) + deploymentManager.DeploymentStatusReturns(appsv1.DeploymentStatus{}, nil) + deploymentManager.GetSchemeReturns(&runtime.Scheme{}) + + client.GetStub = func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + job := obj.(*batchv1.Job) + job.Status.Active = int32(1) + } + return nil + } + + configMapManager.GetCoreConfigReturns(&corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "core.yaml": []byte{}, + }, + }, nil) + + migrator = &v2.Migrate{ + DeploymentManager: deploymentManager, + ConfigMapManager: configMapManager, + Client: client, + } + }) + + Context("migration needed", func() { + It("returns false if deployment not found", func() { + deploymentManager.GetReturns(nil, errors.New("not found")) + needed := migrator.MigrationNeeded(instance) + Expect(needed).To(Equal(false)) + }) + + It("returns true if config map not updated", func() { + dep := &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + v1.Container{ + Name: "peer", + }, + }, + }, + }, + }, + } + deploymentManager.GetReturns(dep, nil) + + needed := migrator.MigrationNeeded(instance) + Expect(needed).To(Equal(true)) + }) + + It("returns true if deployment has dind container", func() { + needed := migrator.MigrationNeeded(instance) + Expect(needed).To(Equal(true)) + }) + }) + + Context("upgrade dbs peer", func() { + BeforeEach(func() { + client.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + if strings.Contains(opts[0].(*k8sclient.ListOptions).LabelSelector.String(), "app") { + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{} + } + if strings.Contains(opts[0].(*k8sclient.ListOptions).LabelSelector.String(), "job-name") { + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{ + corev1.Pod{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + corev1.ContainerStatus{ + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + } + } + return nil + } + }) + + It("returns an error if unable to reset peer", func() { + deploymentManager.GetReturns(nil, errors.New("restore failed")) + err := migrator.UpgradeDBs(instance, config.DBMigrationTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("restore failed"))) + }) + + It("upgrade dbs", func() { + status := appsv1.DeploymentStatus{ + Replicas: int32(0), + } + deploymentManager.DeploymentStatusReturnsOnCall(0, status, nil) + + status.Replicas = 1 + deploymentManager.DeploymentStatusReturnsOnCall(1, status, nil) + + err := migrator.UpgradeDBs(instance, config.DBMigrationTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update config", func() { + It("returns an error if unable to get config map", func() { + configMapManager.GetCoreConfigReturns(nil, errors.New("get config map failed")) + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("get config map failed"))) + }) + + It("returns an error if unable to update config map", func() { + configMapManager.CreateOrUpdateReturns(errors.New("update config map failed")) + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("update config map failed"))) + }) + + It("sets relevant v2.x fields in config", func() { + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).NotTo(HaveOccurred()) + + _, config := configMapManager.CreateOrUpdateArgsForCall(0) + core := config.(*v2config.Core) + + By("setting external builder", func() { + Expect(core.Chaincode.ExternalBuilders).To(ContainElement( + v2peer.ExternalBuilder{ + Name: "ibp-builder", + Path: "/usr/local", + EnvironmentWhiteList: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + }, + PropogateEnvironment: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + "PEER_NAME", + }, + }, + )) + }) + + By("setting install timeout", func() { + Expect(core.Chaincode.InstallTimeout).To(Equal(common.MustParseDuration("300s"))) + }) + + By("setting lifecycle chaincode", func() { + Expect(core.Chaincode.System["_lifecycle"]).To(Equal("enable")) + }) + + By("setting limits", func() { + Expect(core.Peer.Limits).To(Equal(v2peer.Limits{ + Concurrency: v2peer.Concurrency{ + DeliverService: 2500, + EndorserService: 2500, + }, + })) + }) + + By("setting implicit collection dissemination policy", func() { + Expect(core.Peer.Gossip.PvtData.ImplicitCollectionDisseminationPolicy).To(Equal(v2peer.ImplicitCollectionDisseminationPolicy{ + RequiredPeerCount: 0, + MaxPeerCount: 1, + })) + }) + + }) + + It("updates config map", func() { + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("set chaincode launcher resource on CR", func() { + BeforeEach(func() { + client.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + dep := &deployer.Config{ + Defaults: &deployer.Defaults{ + Resources: &deployer.Resources{ + Peer: ¤t.PeerResources{ + CCLauncher: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + }, + }, + }, + } + + bytes, err := yaml.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + cm := obj.(*corev1.ConfigMap) + cm.Data = map[string]string{ + "settings.yaml": string(bytes), + } + } + + return nil + } + + client.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *current.IBPConsoleList: + list := obj.(*current.IBPConsoleList) + list.Items = []current.IBPConsole{current.IBPConsole{}} + } + + return nil + } + }) + + It("sets resources based on deployer config map", func() { + err := migrator.SetChaincodeLauncherResourceOnCR(instance) + Expect(err).NotTo(HaveOccurred()) + + _, cr, _ := client.UpdateArgsForCall(0) + Expect(cr).NotTo(BeNil()) + Expect(*cr.(*current.IBPPeer).Spec.Resources.CCLauncher).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }}, + )) + }) + + It("sets resources default config map", func() { + client.GetStub = nil + + err := migrator.SetChaincodeLauncherResourceOnCR(instance) + Expect(err).NotTo(HaveOccurred()) + + _, cr, _ := client.UpdateArgsForCall(0) + Expect(cr).NotTo(BeNil()) + Expect(*cr.(*current.IBPPeer).Spec.Resources.CCLauncher).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }}, + )) + }) + }) +}) diff --git a/pkg/migrator/peer/fabric/v2/v2_suite_test.go b/pkg/migrator/peer/fabric/v2/v2_suite_test.go new file mode 100644 index 00000000..bbd7a82d --- /dev/null +++ b/pkg/migrator/peer/fabric/v2/v2_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v2_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestV2(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V2 Suite") +} diff --git a/pkg/migrator/peer/peer_suite_test.go b/pkg/migrator/peer/peer_suite_test.go new file mode 100644 index 00000000..e4cc62d5 --- /dev/null +++ b/pkg/migrator/peer/peer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} diff --git a/pkg/offering/base/ca/ca.go b/pkg/offering/base/ca/ca.go new file mode 100644 index 00000000..593a6277 --- /dev/null +++ b/pkg/offering/base/ca/ca.go @@ -0,0 +1,1042 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseca + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + cav1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/pointer" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("base_ca") + +const ( + DaysToSecondsConversion = int64(24 * 60 * 60) +) + +type Override interface { + Deployment(v1.Object, *appsv1.Deployment, resources.Action) error + Service(v1.Object, *corev1.Service, resources.Action) error + PVC(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error + Role(v1.Object, *rbacv1.Role, resources.Action) error + RoleBinding(v1.Object, *rbacv1.RoleBinding, resources.Action) error + ServiceAccount(v1.Object, *corev1.ServiceAccount, resources.Action) error + IsPostgres(instance *current.IBPCA) bool +} + +//go:generate counterfeiter -o mocks/update.go -fake-name Update . Update + +type Update interface { + SpecUpdated() bool + CAOverridesUpdated() bool + TLSCAOverridesUpdated() bool + ConfigOverridesUpdated() bool + RestartNeeded() bool + CACryptoUpdated() bool + CACryptoCreated() bool + RenewTLSCert() bool + FabricVersionUpdated() bool + ImagesUpdated() bool + CATagUpdated() bool +} + +//go:generate counterfeiter -o mocks/restart_manager.go -fake-name RestartManager . RestartManager + +type RestartManager interface { + ForConfigOverride(instance v1.Object) error + TriggerIfNeeded(instance restart.Instance) error + ForTLSReenroll(instance v1.Object) error + ForRestartAction(instance v1.Object) error +} + +type IBPCA interface { + Initialize(instance *current.IBPCA, update Update) error + PreReconcileChecks(instance *current.IBPCA, update Update) (bool, error) + ReconcileManagers(instance *current.IBPCA, update Update) error + Reconcile(instance *current.IBPCA, update Update) (common.Result, error) +} + +//go:generate counterfeiter -o mocks/initialize.go -fake-name InitializeIBPCA . InitializeIBPCA + +type InitializeIBPCA interface { + HandleEnrollmentCAInit(instance *current.IBPCA, update Update) (*initializer.Response, error) + HandleConfigResources(name string, instance *current.IBPCA, resp *initializer.Response, update Update) error + HandleTLSCAInit(instance *current.IBPCA, update Update) (*initializer.Response, error) + SyncDBConfig(*current.IBPCA) (*current.IBPCA, error) + CreateOrUpdateConfigMap(instance *current.IBPCA, data map[string][]byte, name string) error + ReadConfigMap(instance *current.IBPCA, name string) (*corev1.ConfigMap, error) +} + +//go:generate counterfeiter -o mocks/certificate_manager.go -fake-name CertificateManager . CertificateManager + +type CertificateManager interface { + GetDurationToNextRenewalForCert(string, []byte, v1.Object, int64) (time.Duration, error) + GetSecret(string, string) (*corev1.Secret, error) + Expires([]byte, int64) (bool, time.Time, error) + UpdateSecret(v1.Object, string, map[string][]byte) error +} + +var _ IBPCA = &CA{} + +type CA struct { + Client controllerclient.Client + Scheme *runtime.Scheme + Config *config.Config + + DeploymentManager resources.Manager + ServiceManager resources.Manager + PVCManager resources.Manager + RoleManager resources.Manager + RoleBindingManager resources.Manager + ServiceAccountManager resources.Manager + + Override Override + Initializer InitializeIBPCA + + CertificateManager CertificateManager + RenewCertTimers map[string]*time.Timer + + Restart RestartManager +} + +func New(client controllerclient.Client, scheme *runtime.Scheme, config *config.Config, o Override) *CA { + ca := &CA{ + Client: client, + Scheme: scheme, + Config: config, + Override: o, + } + ca.CreateManagers() + ca.Initializer = NewInitializer(config.CAInitConfig, scheme, client, ca.GetLabels, config.Operator.CA.Timeouts.HSMInitJob) + ca.Restart = restart.New(client, config.Operator.Restart.WaitTime.Get(), config.Operator.Restart.Timeout.Get()) + ca.CertificateManager = &certificate.CertificateManager{ + Client: client, + Scheme: scheme, + } + ca.RenewCertTimers = make(map[string]*time.Timer) + + return ca +} + +func (ca *CA) CreateManagers() { + override := ca.Override + resourceManager := resourcemanager.New(ca.Client, ca.Scheme) + ca.DeploymentManager = resourceManager.CreateDeploymentManager("", override.Deployment, ca.GetLabels, ca.Config.CAInitConfig.DeploymentFile) + ca.ServiceManager = resourceManager.CreateServiceManager("", override.Service, ca.GetLabels, ca.Config.CAInitConfig.ServiceFile) + ca.PVCManager = resourceManager.CreatePVCManager("", override.PVC, ca.GetLabels, ca.Config.CAInitConfig.PVCFile) + ca.RoleManager = resourceManager.CreateRoleManager("", override.Role, ca.GetLabels, ca.Config.CAInitConfig.RoleFile) + ca.RoleBindingManager = resourceManager.CreateRoleBindingManager("", override.RoleBinding, ca.GetLabels, ca.Config.CAInitConfig.RoleBindingFile) + ca.ServiceAccountManager = resourceManager.CreateServiceAccountManager("", override.ServiceAccount, ca.GetLabels, ca.Config.CAInitConfig.ServiceAccountFile) +} + +func (ca *CA) Reconcile(instance *current.IBPCA, update Update) (common.Result, error) { + var err error + + versionSet, err := ca.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := ca.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Updating instance after pre reconcile checks") + err := ca.Client.Patch(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPCA{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + err = ca.AddTLSCryptoIfMissing(instance, ca.GetEndpointsDNS(instance)) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to generate tls crypto") + } + + err = ca.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.CAInitilizationFailed, "failed to initialize ca") + } + + err = ca.ReconcileManagers(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = ca.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = ca.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + if update.CACryptoUpdated() { + log.Info("TLS crypto updated, triggering restart") + err = ca.Restart.ForTLSReenroll(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + err = ca.HandleActions(instance, update) + if err != nil { + return common.Result{}, err + } + + err = ca.HandleRestart(instance, update) + if err != nil { + return common.Result{}, err + } + + return common.Result{}, nil +} + +// PreReconcileChecks validate CR request before starting reconcile flow +func (ca *CA) PreReconcileChecks(instance *current.IBPCA, update Update) (bool, error) { + var err error + + imagesUpdated, err := reconcilechecks.FabricVersionHelper(instance, ca.Config.Operator.Versions, update) + if err != nil { + return false, errors.Wrap(err, "failed to during version and image checks") + } + + var maxNameLength *int + if instance.Spec.ConfigOverride != nil { + maxNameLength = instance.Spec.ConfigOverride.MaxNameLength + } + err = util.ValidationChecks(instance.TypeMeta, instance.ObjectMeta, "IBPCA", maxNameLength) + if err != nil { + return false, err + } + + if instance.Spec.HSMSet() { + err = util.ValidateHSMProxyURL(instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return false, errors.Wrapf(err, "invalid HSM endpoint for ca instance '%s'", instance.GetName()) + } + } + + if !instance.Spec.DomainSet() { + return false, fmt.Errorf("domain not set for ca instance '%s'", instance.GetName()) + } + + zoneUpdated, err := ca.SelectZone(instance) + if err != nil { + return false, err + } + + regionUpdated, err := ca.SelectRegion(instance) + if err != nil { + return false, err + } + + hsmImageUpdated := ca.ReconcileHSMImages(instance) + + var replicasUpdated bool + if instance.Spec.Replicas == nil { + replicas := int32(1) + instance.Spec.Replicas = &replicas + replicasUpdated = true + } + + updated := zoneUpdated || regionUpdated || hsmImageUpdated || replicasUpdated || imagesUpdated + + if updated { + log.Info(fmt.Sprintf("zoneUpdated %t, regionUpdated %t, hsmImageUpdated %t, replicasUpdated %t, imagesUpdated %t", + zoneUpdated, regionUpdated, hsmImageUpdated, replicasUpdated, imagesUpdated)) + } + + return updated, nil +} + +func (ca *CA) SetVersion(instance *current.IBPCA) (bool, error) { + if instance.Status.Version == "" || !version.String(instance.Status.Version).Equal(version.Operator) { + log.Info("Version of Operator: ", "version", version.Operator) + log.Info("Version of CR: ", "version", instance.Status.Version) + log.Info(fmt.Sprintf("Setting '%s' to version '%s'", instance.Name, version.Operator)) + + instance.Status.Version = version.Operator + err := ca.Client.PatchStatus(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPCA{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (ca *CA) Initialize(instance *current.IBPCA, update Update) error { + var err error + + // TODO: Add checks to determine if initialization is neeeded. Split this method into + // two, one should handle initialization during the create event of a CR and the other + // should update events + + // Service account is required by job + err = ca.ReconcileRBAC(instance) + if err != nil { + return err + } + + if instance.IsHSMEnabled() { + // If HSM config not found, HSM proxy is being used + if instance.UsingHSMProxy() { + err = os.Setenv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return err + } + } else { + hsmConfig, err := commonconfig.ReadHSMConfig(ca.Client, instance) + if err != nil { + return errors.New("using non-proxy HSM, but no HSM config defined as config map 'ibp-hsm-config'") + } + + if hsmConfig.Daemon != nil { + log.Info("Using daemon based HSM, creating pvc...") + ca.PVCManager.SetCustomName(instance.Spec.CustomNames.PVC.CA) + err = ca.PVCManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed PVC reconciliation") + } + } + } + } + + instance, err = ca.Initializer.SyncDBConfig(instance) + if err != nil { + return err + } + + eresp, err := ca.Initializer.HandleEnrollmentCAInit(instance, update) + if err != nil { + return err + } + + if eresp != nil { + err = ca.Initializer.HandleConfigResources(fmt.Sprintf("%s-ca", instance.GetName()), instance, eresp, update) + if err != nil { + return err + } + } + + tresp, err := ca.Initializer.HandleTLSCAInit(instance, update) + if err != nil { + return err + } + + if tresp != nil { + err = ca.Initializer.HandleConfigResources(fmt.Sprintf("%s-tlsca", instance.GetName()), instance, tresp, update) + if err != nil { + return err + } + } + + // If deployment exists, and configoverride update detected need to restart pod(s) to pick up + // the latest configuration from configmap and secret + if ca.DeploymentManager.Exists(instance) && update.ConfigOverridesUpdated() { + // Request deployment restart for config override update + if err := ca.Restart.ForConfigOverride(instance); err != nil { + return err + } + } + + return nil +} + +func (ca *CA) SelectZone(instance *current.IBPCA) (bool, error) { + if instance.Spec.Zone == "select" { + zone := util.GetZone(ca.Client) + instance.Spec.Zone = zone + return true, nil + } + if instance.Spec.Zone != "" { + err := util.ValidateZone(ca.Client, instance.Spec.Zone) + if err != nil { + return false, err + } + } + return false, nil +} + +func (ca *CA) SelectRegion(instance *current.IBPCA) (bool, error) { + if instance.Spec.Region == "select" { + region := util.GetRegion(ca.Client) + instance.Spec.Region = region + return true, nil + } + if instance.Spec.Region != "" { + err := util.ValidateRegion(ca.Client, instance.Spec.Region) + if err != nil { + return false, err + } + } + return false, nil +} + +func (ca *CA) ReconcileManagers(instance *current.IBPCA, updated Update) error { + var err error + + update := updated.SpecUpdated() + + if !ca.Override.IsPostgres(instance) { + log.Info("Using sqlite database, creating pvc...") + ca.PVCManager.SetCustomName(instance.Spec.CustomNames.PVC.CA) + err = ca.PVCManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed PVC reconciliation") + } + } + + err = ca.ServiceManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Service reconciliation") + } + + err = ca.ReconcileRBAC(instance) + if err != nil { + return errors.Wrap(err, "failed RBAC reconciliation") + } + + err = ca.DeploymentManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Deployment reconciliation") + } + + // TODO: Can this be removed? + err = ca.createSecret(instance, "-ca") + if err != nil { + return errors.Wrap(err, "failed CA Secret reconciliation") + } + + // TODO: Can this be removed? + err = ca.createSecret(instance, "-tlsca") + if err != nil { + return errors.Wrap(err, "failed TLS Secret reconciliation") + } + + return nil +} + +func (ca *CA) ReconcileRBAC(instance *current.IBPCA) error { + var err error + + err = ca.RoleManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = ca.RoleBindingManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = ca.ServiceAccountManager.Reconcile(instance, false) + if err != nil { + return err + } + + return nil +} + +func (ca *CA) UpdateConnectionProfile(instance *current.IBPCA) error { + var err error + + endpoints := ca.GetEndpoints(instance) + + cacrypto, err := common.GetCACryptoEncoded(ca.Client, instance) + if err != nil { + return err + } + tlscacrypto, err := common.GetTLSCACryptoEncoded(ca.Client, instance) + if err != nil { + return err + } + + err = ca.UpdateConnectionProfileConfigmap(instance, *endpoints, cacrypto.TLSCert, cacrypto.Cert, tlscacrypto.Cert) + if err != nil { + return err + } + + return nil +} + +func (ca *CA) UpdateConnectionProfileConfigmap(instance *current.IBPCA, endpoints current.CAEndpoints, tlscert, cacert, tlscacert string) error { + var err error + + name := instance.Name + "-connection-profile" + nn := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + log.Info(fmt.Sprintf("Create connection profle configmap called for %s", instance.Name)) + connectionProfile := ¤t.CAConnectionProfile{ + Endpoints: endpoints, + TLS: ¤t.ConnectionProfileTLS{ + Cert: tlscert, + }, + CA: ¤t.MSP{ + SignCerts: cacert, + }, + TLSCA: ¤t.MSP{ + SignCerts: tlscacert, + }, + } + + bytes, err := json.Marshal(connectionProfile) + if err != nil { + return errors.Wrap(err, "failed to marshal connectionprofile") + } + + cm := &corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: ca.GetLabels(instance), + }, + BinaryData: map[string][]byte{"profile.json": bytes}, + } + + err = ca.Client.Get(context.TODO(), nn, &corev1.ConfigMap{}) + if err == nil { + err = ca.Client.Update(context.TODO(), cm, controllerclient.UpdateOption{Owner: instance, Scheme: ca.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to update connection profile configmap") + } + } else { + err = ca.Client.Create(context.TODO(), cm, controllerclient.CreateOption{Owner: instance, Scheme: ca.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to create connection profile configmap") + } + } + + return nil +} + +func (ca *CA) GetEndpoints(instance *current.IBPCA) *current.CAEndpoints { + endpoints := ¤t.CAEndpoints{ + API: "https://" + instance.Namespace + "-" + instance.Name + "-ca." + instance.Spec.Domain + ":443", + Operations: "https://" + instance.Namespace + "-" + instance.Name + "-operations." + instance.Spec.Domain + ":443", + } + return endpoints +} + +func (ca *CA) CheckStates(instance *current.IBPCA) error { + // Check state if deployment exists, make sure that deployment matches what is expected + // base on IBPCA spec + if ca.DeploymentManager.Exists(instance) { + err := ca.DeploymentManager.CheckState(instance) + if err != nil { + log.Error(err, "unexpected state") + err = ca.DeploymentManager.RestoreState(instance) + if err != nil { + return err + } + } + } + + return nil +} + +func (ca *CA) GetLabels(instance v1.Object) map[string]string { + return instance.GetLabels() +} + +// TODO: Can this be removed? +func (ca *CA) createSecret(instance *current.IBPCA, suffix string) error { + secretCA := &corev1.Secret{} + secretCA.Name = instance.Name + suffix + secretCA.Namespace = instance.Namespace + secretCA.Labels = ca.GetLabels(instance) + + secretCA.Data = map[string][]byte{} + secretCA.Data["_shared_creation"] = []byte("-----BEGIN") + + err := ca.Client.Create(context.TODO(), secretCA, controllerclient.CreateOption{ + Owner: instance, + Scheme: ca.Scheme, + }) + if err != nil { + return err + } + + return nil +} + +func (ca *CA) CreateCACryptoSecret(instance *current.IBPCA, caCrypto map[string][]byte) error { + // Create CA secret with crypto + secret := &corev1.Secret{ + Data: caCrypto, + Type: corev1.SecretTypeOpaque, + } + secret.Name = instance.Name + "-ca-crypto" + secret.Namespace = instance.Namespace + secret.Labels = ca.GetLabels(instance) + + err := ca.Client.Create(context.TODO(), secret, controllerclient.CreateOption{ + Owner: instance, + Scheme: ca.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create CA crypto secret") + } + + return nil +} + +func (ca *CA) CreateTLSCACryptoSecret(instance *current.IBPCA, tlscaCrypto map[string][]byte) error { + // Create TLSCA secret with crypto + secret := &corev1.Secret{ + Data: tlscaCrypto, + Type: corev1.SecretTypeOpaque, + } + secret.Name = instance.Name + "-tlsca-crypto" + secret.Namespace = instance.Namespace + secret.Labels = ca.GetLabels(instance) + + err := ca.Client.Create(context.TODO(), secret, controllerclient.CreateOption{ + Owner: instance, + Scheme: ca.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create TLS CA crypto secret") + } + + return nil +} + +func (ca *CA) AddTLSCryptoIfMissing(instance *current.IBPCA, endpoints *current.CAEndpoints) error { + var err error + caOverrides := &cav1.ServerConfig{} + + genTLSCrypto := func() error { + tlskey, tlscert, err := ca.GenTLSCrypto(instance, endpoints) + if err != nil { + return err + } + + base64cert := base64.StdEncoding.EncodeToString(tlscert) + base64key := base64.StdEncoding.EncodeToString(tlskey) + + caOverrides.TLS = cav1.ServerTLSConfig{ + Enabled: pointer.True(), + CertFile: base64cert, + KeyFile: base64key, + } + + caBytes, err := json.Marshal(caOverrides) + if err != nil { + return err + } + + instance.Spec.ConfigOverride.CA = &runtime.RawExtension{Raw: caBytes} + return nil + } + + // check for cert + err = ca.CheckForTLSSecret(instance) + if err != nil { + log.Info(fmt.Sprintf("No TLS crypto configurated for CA '%s', generating TLS crypto...", instance.GetName())) + // that means secret is not found on cluster + if instance.Spec.ConfigOverride == nil { + instance.Spec.ConfigOverride = ¤t.ConfigOverride{} + err := genTLSCrypto() + if err != nil { + return err + } + + return nil + } + + if instance.Spec.ConfigOverride.CA == nil { + err := genTLSCrypto() + if err != nil { + return err + } + + return nil + } + + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.CA != nil { + err = json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + if err != nil { + return err + } + + if caOverrides.TLS.CertFile == "" { + err := genTLSCrypto() + if err != nil { + return err + } + } + + return nil + } + } + + return nil +} + +func (ca *CA) GenTLSCrypto(instance *current.IBPCA, endpoints *current.CAEndpoints) ([]byte, []byte, error) { + priv, err := ecdsa.GenerateKey(elliptic.P256(), crand.Reader) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to generate key") + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := crand.Int(crand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to generate serial number") + } + + notBefore := time.Now() + notAfter := notBefore.Add(time.Hour * 87600) // valid for 10 years + + template := x509.Certificate{ + SerialNumber: serialNumber, + Issuer: pkix.Name{ + Country: []string{"US"}, + Province: []string{"North Carolina"}, + Locality: []string{"Durham"}, + Organization: []string{"IBM"}, + OrganizationalUnit: []string{"Blockchain"}, + CommonName: endpoints.API, + }, + Subject: pkix.Name{ + Country: []string{"US"}, + Province: []string{"North Carolina"}, + Locality: []string{"Durham"}, + Organization: []string{"IBM"}, + OrganizationalUnit: []string{"Blockchain"}, + CommonName: endpoints.API, + }, + + NotBefore: notBefore, + NotAfter: notAfter, + } + + ip := net.ParseIP(endpoints.API) + if ip == nil { + template.DNSNames = append(template.DNSNames, endpoints.API) + } else { + template.IPAddresses = append(template.IPAddresses, ip) + } + ip = net.ParseIP(endpoints.Operations) + if ip == nil { + template.DNSNames = append(template.DNSNames, endpoints.Operations) + } else { + template.IPAddresses = append(template.IPAddresses, ip) + } + + derBytes, err := x509.CreateCertificate(crand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create certificate") + } + + keyBytes, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to marshal key") + } + + certPEM := &pem.Block{Type: "CERTIFICATE", Bytes: derBytes} + keyPEM := &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} + + certBytes := pem.EncodeToMemory(certPEM) + keyBytes = pem.EncodeToMemory(keyPEM) + + return keyBytes, certBytes, nil +} + +func (ca *CA) CheckForTLSSecret(instance *current.IBPCA) error { + secret := &corev1.Secret{} + err := ca.Client.Get(context.TODO(), types.NamespacedName{ + Name: fmt.Sprintf("%s-tlsca-crypto", instance.Name), + Namespace: instance.Namespace}, secret) + return err +} + +func (ca *CA) CheckCertificates(instance *current.IBPCA) (*current.CRStatus, error) { + secret, err := ca.CertificateManager.GetSecret( + fmt.Sprintf("%s-ca-crypto", instance.GetName()), + instance.GetNamespace(), + ) + + numSecondsBeforeExpire := instance.GetNumSecondsWarningPeriod() + expiring, expireDate, err := ca.CertificateManager.Expires(secret.Data["tls-cert.pem"], numSecondsBeforeExpire) + if err != nil { + return nil, err + } + + var message string + statusType := current.Deployed + + if expiring { + statusType = current.Warning + // Check if tls cert's expiration date has already passed + if expireDate.Before(time.Now()) { + statusType = current.Error + message += fmt.Sprintf("TLS cert for '%s' has expired", instance.GetName()) + } else { + message += fmt.Sprintf("TLS cert for '%s' expires on %s", instance.GetName(), expireDate.String()) + } + } + + crStatus := ¤t.CRStatus{ + Type: statusType, + Message: message, + } + + switch statusType { + case current.Deployed: + crStatus.Reason = "allPodsDeployed" + default: + crStatus.Reason = "certRenewalRequired" + } + + return crStatus, nil +} + +func (ca *CA) RenewCert(instance *current.IBPCA, endpoints *current.CAEndpoints) error { + log.Info(fmt.Sprintf("Renewing TLS certificate for CA '%s'", instance.GetName())) + + tlskey, tlscert, err := ca.GenTLSCrypto(instance, endpoints) + if err != nil { + return err + } + + name := fmt.Sprintf("%s-ca-crypto", instance.GetName()) + secret, err := ca.CertificateManager.GetSecret( + name, + instance.GetNamespace(), + ) + + secret.Data["tls-cert.pem"] = tlscert + secret.Data["tls-key.pem"] = tlskey + secret.Data["operations-cert.pem"] = tlscert + secret.Data["operations-key.pem"] = tlskey + + if err := ca.CertificateManager.UpdateSecret(instance, name, secret.Data); err != nil { + return err + } + + return nil +} + +func (ca *CA) GetEndpointsDNS(instance *current.IBPCA) *current.CAEndpoints { + return ¤t.CAEndpoints{ + API: fmt.Sprintf("%s-%s-ca.%s", instance.Namespace, instance.Name, instance.Spec.Domain), + Operations: fmt.Sprintf("%s-%s-operations.%s", instance.Namespace, instance.Name, instance.Spec.Domain), + } +} + +func (ca *CA) ReconcileHSMImages(instance *current.IBPCA) bool { + hsmConfig, err := commonconfig.ReadHSMConfig(ca.Client, instance) + if err != nil { + return false + } + + if hsmConfig.Library.AutoUpdateDisabled { + return false + } + + updated := false + if hsmConfig.Library.Image != "" { + hsm := strings.Split(hsmConfig.Library.Image, ":") + image := hsm[0] + tag := hsm[1] + + if instance.Spec.Images.HSMImage != image { + instance.Spec.Images.HSMImage = image + updated = true + } + + if instance.Spec.Images.HSMTag != tag { + instance.Spec.Images.HSMTag = tag + updated = true + } + } + + return updated +} + +func (ca *CA) HandleActions(instance *current.IBPCA, update Update) error { + orig := instance.DeepCopy() + + if update.RenewTLSCert() { + if err := common.BackupCACrypto(ca.Client, ca.Scheme, instance, ca.GetLabels(instance)); err != nil { + return errors.Wrap(err, "failed to backup crypto before renewing cert") + } + + if err := ca.RenewCert(instance, ca.GetEndpointsDNS(instance)); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSRenew() + return err + } + instance.ResetTLSRenew() + } + + if update.RestartNeeded() { + if err := ca.RestartAction(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetRestart() + return err + } + instance.ResetRestart() + } + + if err := ca.Client.Patch(context.TODO(), instance, k8sclient.MergeFrom(orig)); err != nil { + return errors.Wrap(err, "failed to reset action flags") + } + + return nil +} + +func (ca *CA) RestartAction(instance *current.IBPCA) error { + log.Info("Restart triggered via action parameter") + if err := ca.Restart.ForRestartAction(instance); err != nil { + return errors.Wrap(err, "failed to restart ca pods") + } + return nil +} + +func (ca *CA) HandleRestart(instance *current.IBPCA, update Update) error { + // If restart is disabled for components, can return immediately + if ca.Config.Operator.Restart.Disable.Components { + return nil + } + + err := ca.Restart.TriggerIfNeeded(instance) + if err != nil { + return errors.Wrap(err, "failed to restart deployment") + } + + return nil +} + +func (ca *CA) ReconcileFabricCAMigration(instance *current.IBPCA) error { + cmname := fmt.Sprintf("%s-ca-config", instance.GetName()) + cm, err := ca.Initializer.ReadConfigMap(instance, cmname) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("Migrating config map '%s'", cmname)) + + serverConfig := &cav1.ServerConfig{} + err = yaml.Unmarshal(cm.BinaryData["fabric-ca-server-config.yaml"], serverConfig) + if err != nil { + return err + } + + if serverConfig.CA.ReenrollIgnoreCertExpiry == pointer.True() { + // if it is already updated no need to update configmap + return nil + } else { + serverConfig.CA.ReenrollIgnoreCertExpiry = pointer.True() + } + + caConfigBytes, err := yaml.Marshal(serverConfig) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("Updating config map '%s'", cmname)) + + cm.BinaryData["fabric-ca-server-config.yaml"] = caConfigBytes + + err = ca.Initializer.CreateOrUpdateConfigMap(instance, cm.BinaryData, cmname) + if err != nil { + return err + } + return nil +} diff --git a/pkg/offering/base/ca/ca_suite_test.go b/pkg/offering/base/ca/ca_suite_test.go new file mode 100644 index 00000000..ceff7a0f --- /dev/null +++ b/pkg/offering/base/ca/ca_suite_test.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseca_test + +import ( + "net" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} + +var ( + ln net.Listener +) + +var _ = BeforeSuite(func() { + var err error + ln, err = net.Listen("tcp", "0.0.0.0:2345") + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + ln.Close() +}) diff --git a/pkg/offering/base/ca/ca_test.go b/pkg/offering/base/ca/ca_test.go new file mode 100644 index 00000000..46bcd4ef --- /dev/null +++ b/pkg/offering/base/ca/ca_test.go @@ -0,0 +1,697 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseca_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/mocks" + basecamocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/mocks" + override "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" +) + +var _ = Describe("Base CA", func() { + const ( + defaultConfigs = "../../../../defaultconfig/ca" + testdataDir = "../../../../testdata" + + keyBase64 = "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdFJBUDlMemUyZEc1cm1rbmcvdVVtREFZU0VwUElqRFdUUDhqUjMxcUJ5Yjc3YWUrCnk3UTRvRnZod1lDVUhsUWVTWjFKeTdUUHpEcitoUk5hdDJYNGdGYUpGYmVFbC9DSHJ3Rk1mNzNzQStWV1pHdnkKdXhtbjB2bEdYMW5zSEo5aUdIUS9qR2FvV1FJYzlVbnpHWi8yWStlZkpxOWd3cDBNemFzWWZkdXordXVBNlp4VAp5TTdDOWFlWmxYL2ZMYmVkSXVXTzVzaXhPSlZQeUVpcWpkd0RiY1AxYy9mRCtSMm1DbmM3VGovSnVLK1poTGxPCnhGcVlFRmtROHBmSi9LY1pabVF1QURZVFh6RGp6OENxcTRTRU5ySzI0b2hQQkN2SGgyanplWjhGdGR4MmpSSFQKaXdCZWZEYWlSWVBSOUM4enk4K1Z2Wmt6S0hQV3N5aENiNUMrN1FJREFRQUJBb0lCQUZROGhzL2IxdW9Mc3BFOApCdEJXaVVsTWh0K0xBc25yWXFncnd5UU5hdmlzNEdRdXVJdFk2MGRmdCtZb2hjQ2ViZ0RkbG1tWlUxdTJ6cGJtCjdEdUt5MVFaN21rV0dpLytEWUlUM3AxSHBMZ2pTRkFzRUorUFRnN1BQamc2UTZrRlZjUCt3Vm4yb0xmWVRkU28KZE5zbEdxSmNMaVQzVHRMNzhlcjFnTTE5RzN6T3J1ZndrSGJSYU1BRmtvZ1ExUlZLSWpnVGUvbmpIMHFHNW9JagoxNEJLeFFKTUZFTG1pQk50NUx5OVMxWWdxTDRjbmNtUDN5L1QyNEdodVhNckx0eTVOeVhnS0dFZ1pUTDMzZzZvCnYreDFFMFRURWRjMVQvWVBGWkdBSXhHdWRKNWZZZ2JtWU9LZ09mUHZFOE9TbEV6OW56aHNnckVZYjdQVThpZDUKTHFycVJRRUNnWUVBNjIyT3RIUmMxaVY1ZXQxdHQydTVTTTlTS2h2b0lPT3d2Q3NnTEI5dDJzNEhRUlRYN0RXcAo0VDNpUC9leEl5OXI3bTIxNFo5MEgzZlpVNElSUkdHSUxKUVMrYzRQNVA4cHJFTDcyd1dIWlpQTTM3QlZTQ1U3CkxOTXl4TkRjeVdjSUJIVFh4NUY2eXhLNVFXWTg5MVB0eDlDamJFSEcrNVJVdDA4UVlMWDlUQTBDZ1lFQXhPSmYKcXFjeThMOVZyYUFVZG9lbGdIU0NGSkJRR3hMRFNSQlJSTkRIOUJhaWlZOCtwZzd2TExTRXFMRFpsbkZPbFkrQQpiRENEQ0RtdHhwRXViY0x6b3FnOXhlQTZ0eXZZWkNWalY5dXVzNVh1Wmk1VDBBUHhCdm56OHNNa3dRY3RQWkRQCk8zQTN4WllkZzJBRmFrV1BmT1FFbjVaK3F4TU13SG9VZ1ZwQkptRUNnWUJ2Q2FjcTJVOEgrWGpJU0ROOU5TT1kKZ1ovaEdIUnRQcmFXcVVodFJ3MkxDMjFFZHM0NExEOUphdVNSQXdQYThuelhZWXROTk9XU0NmYkllaW9tdEZHRApwUHNtTXRnd1MyQ2VUS0Y0OWF5Y2JnOU0yVi8vdlAraDdxS2RUVjAwNkpGUmVNSms3K3FZYU9aVFFDTTFDN0swCmNXVUNwQ3R6Y014Y0FNQmF2THNRNlFLQmdHbXJMYmxEdjUxaXM3TmFKV0Z3Y0MwL1dzbDZvdVBFOERiNG9RV1UKSUowcXdOV2ZvZm95TGNBS3F1QjIrbkU2SXZrMmFiQ25ZTXc3V0w4b0VJa3NodUtYOVgrTVZ6Y1VPekdVdDNyaQpGeU9mcHJJRXowcm5zcWNSNUJJNUZqTGJqVFpyMEMyUWp2NW5FVFAvaHlpQWFRQ1l5THAyWlVtZ0Vjb0VPNWtwClBhcEJBb0dBZVV0WjE0SVp2cVorQnAxR1VqSG9PR0pQVnlJdzhSRUFETjRhZXRJTUlQRWFVaDdjZUtWdVN6VXMKci9WczA1Zjg0cFBVaStuUTUzaGo2ZFhhYTd1UE1aMFBnNFY4cS9UdzJMZ3BWWndVd0ltZUQrcXNsbldha3VWMQpMSnp3SkhOa3pOWE1OMmJWREFZTndSamNRSmhtbzF0V2xHYlpRQjNoSkEwR2thWGZPa2c9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" + certBase64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBekNDQWV1Z0F3SUJBZ0lKQU9xQ1VmaFNjcWtlTUEwR0NTcUdTSWIzRFFFQkJRVUFNQmd4RmpBVUJnTlYKQkFNTURYQnZjM1JuY21WekxuUmxjM1F3SGhjTk1Ua3dOekl6TVRrd09UVTRXaGNOTWprd056SXdNVGt3T1RVNApXakFZTVJZd0ZBWURWUVFEREExd2IzTjBaM0psY3k1MFpYTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF0UkFQOUx6ZTJkRzVybWtuZy91VW1EQVlTRXBQSWpEV1RQOGpSMzFxQnliNzdhZSsKeTdRNG9Gdmh3WUNVSGxRZVNaMUp5N1RQekRyK2hSTmF0Mlg0Z0ZhSkZiZUVsL0NIcndGTWY3M3NBK1ZXWkd2eQp1eG1uMHZsR1gxbnNISjlpR0hRL2pHYW9XUUljOVVuekdaLzJZK2VmSnE5Z3dwME16YXNZZmR1eit1dUE2WnhUCnlNN0M5YWVabFgvZkxiZWRJdVdPNXNpeE9KVlB5RWlxamR3RGJjUDFjL2ZEK1IybUNuYzdUai9KdUsrWmhMbE8KeEZxWUVGa1E4cGZKL0tjWlptUXVBRFlUWHpEano4Q3FxNFNFTnJLMjRvaFBCQ3ZIaDJqemVaOEZ0ZHgyalJIVAppd0JlZkRhaVJZUFI5Qzh6eTgrVnZaa3pLSFBXc3loQ2I1Qys3UUlEQVFBQm8xQXdUakFkQmdOVkhRNEVGZ1FVCi9mZ01BcExIMXBvcFFoS25KTmgrVk04QUtQZ3dId1lEVlIwakJCZ3dGb0FVL2ZnTUFwTEgxcG9wUWhLbkpOaCsKVk04QUtQZ3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQkFRVUZBQU9DQVFFQURjOUc4M05LaWw3ZQpoVFlvR1piejhFV1o4c0puVnY4azMwRDlydUY1OXFvT0ppZGorQUhNbzNHOWtud1lvbGFGbmJwb093cElOZ3g1CnYvL21aU3VldlFMZUZKRlN1UjBheVQ1WFYxcjljNUZGQ2JSaEp0cE4rOEdTT29tRUFSYTNBVGVFSG5WeVpaYkMKWkFQQUxMVXlVeUVrSDR3Q0RZUGtYa3dWQVVlR2FGVmNqZWR0eGJ3Z2k0dG0rSFZoTEt5Y0NoZ25YUVhxQ2srTwo2RHJIc0Z0STVTNWQvQlBPbE1Yc28vNUFielBGelpVVVg4OEhkVUhWSWlqM0luMXdUbWhtREtwdzZ6dmcvNjIxCjRhcGhDOWJ2bXAxeUVOUklzb0xiMGlMWVAzRSswU0ZkZC9IRnRhVXV3eUx6cnl4R2xrdG1BVUJWNVdYZEQxMkIKTU1mQnhvNFVYUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + ) + + AfterEach(func() { + err := os.RemoveAll("shared") + Expect(err).NotTo(HaveOccurred()) + }) + + var ( + ca *baseca.CA + instance *current.IBPCA + mockKubeClient *cmocks.Client + + deploymentMgr *managermocks.ResourceManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + + initMock *basecamocks.InitializeIBPCA + update *mocks.Update + restartMgr *basecamocks.RestartManager + certMgr *basecamocks.CertificateManager + ) + + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPCA{ + Status: current.IBPCAStatus{ + CRStatus: current.CRStatus{ + Version: version.Operator, + }, + }, + Spec: current.IBPCASpec{ + Domain: "domain", + HSM: ¤t.HSM{ + PKCS11Endpoint: "tcp://0.0.0.0:2345", + }, + Images: ¤t.CAImages{ + CAImage: "caimage", + CATag: "2.0.0", + CAInitImage: "cainitimage", + CAInitTag: "2.0.0", + }, + Replicas: &replicas, + FabricVersion: "1.4.9-0", + }, + } + instance.Kind = "IBPCA" + instance.Name = "ca1" + instance.Namespace = "test" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case instance.Name + "-ca-crypto": + o.Name = instance.Name + "-ca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{ + "tls-cert.pem": []byte(certBase64), + "cert.pem": []byte(certBase64), + "operations-cert.pem": []byte(certBase64), + } + case instance.Name + "-tlsca-crypto": + o.Name = instance.Name + "-tlsca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{ + "cert.pem": []byte(certBase64), + } + } + + } + return nil + } + + deploymentMgr = &managermocks.ResourceManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + initMock = &basecamocks.InitializeIBPCA{} + restartMgr = &basecamocks.RestartManager{} + certMgr = &basecamocks.CertificateManager{} + + initMock.SyncDBConfigReturns(instance, nil) + + config := &config.Config{ + CAInitConfig: &initializer.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "/ca.yaml"), + CAOverrideConfigPath: filepath.Join(testdataDir, "init/override.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "tlsca.yaml"), + TLSCAOverrideConfigPath: filepath.Join(testdataDir, "init/override.yaml"), + SharedPath: "shared", + }, + Operator: config.Operator{ + Versions: &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4.9-0": { + Default: true, + Image: deployer.CAImages{ + CAImage: "caimage", + CATag: "1.4.9", + CAInitImage: "cainitimage", + CAInitTag: "1.4.9", + }, + }, + }, + }, + }, + } + + deploymentMgr.ExistsReturns(true) + ca = &baseca.CA{ + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Override: &override.Override{}, + Config: config, + Initializer: initMock, + Restart: restartMgr, + CertificateManager: certMgr, + } + }) + + Context("Reconciles", func() { + It("requeues request and returns nil if instance version is updated", func() { + instance.Status.CRStatus.Version = "" + _, err := ca.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchStatusCallCount()).To(Equal(1)) + }) + + It("returns a breaking error if initialization fails", func() { + initMock.HandleEnrollmentCAInitReturns(nil, errors.New("failed to init")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 20 - failed to initialize ca: failed to init")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("returns an error for invalid HSM endpoint", func() { + instance.Spec.HSM.PKCS11Endpoint = "tcp://:2345" + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("failed pre reconcile checks: invalid HSM endpoint for ca instance '%s': missing IP address", instance.Name))) + }) + + It("returns an error domain is not set", func() { + instance.Spec.Domain = "" + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("failed pre reconcile checks: domain not set for ca instance '%s'", instance.Name))) + }) + + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role binding")) + }) + + It("returns an error if service account manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile service account")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if restart fails", func() { + update.RestartNeededReturns(true) + mockKubeClient.PatchReturns(errors.New("patch failed")) + _, err := ca.Reconcile(instance, update) + Expect(err).Should(MatchError(ContainSubstring("patch failed"))) + }) + + It("reconciles IBPCA", func() { + _, err := ca.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("initialize", func() { + It("returns an error if enrollment ca init fails", func() { + msg := "failed to init enrollment ca" + initMock.HandleEnrollmentCAInitReturns(nil, errors.New(msg)) + err := ca.Initialize(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to create create config resources for enrollment ca", func() { + msg := "failed to create config resources for enrollment ca" + initMock.HandleEnrollmentCAInitReturns(&initializer.Response{}, nil) + initMock.HandleConfigResourcesReturns(errors.New(msg)) + err := ca.Initialize(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if tls ca init fails", func() { + msg := "failed to init tls ca" + initMock.HandleTLSCAInitReturns(nil, errors.New(msg)) + err := ca.Initialize(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("returns an error if unable to create create config resources for tls ca", func() { + msg := "failed to create config resources for tls ca" + initMock.HandleEnrollmentCAInitReturns(&initializer.Response{Config: &v1.ServerConfig{}}, nil) + initMock.HandleTLSCAInitReturns(&initializer.Response{Config: &v1.ServerConfig{}}, nil) + initMock.HandleConfigResourcesReturnsOnCall(1, errors.New(msg)) + err := ca.Initialize(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("triggers deployment restart if deployment exists and overrides update detected", func() { + deploymentMgr.ExistsReturns(true) + update.ConfigOverridesUpdatedReturns(true) + + err := ca.Initialize(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(restartMgr.ForConfigOverrideCallCount()).To(Equal(1)) + }) + }) + + Context("AddTLSCryptoIfMissing", func() { + It("adds tls crypto", func() { + mockKubeClient.GetReturns(errors.New("fake error")) + err := ca.AddTLSCryptoIfMissing(instance, ¤t.CAEndpoints{}) + Expect(err).NotTo(HaveOccurred()) + + caOverrides := &v1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + Expect(err).NotTo(HaveOccurred()) + + Expect(caOverrides.TLS.CertFile).NotTo(Equal("")) + Expect(caOverrides.TLS.KeyFile).NotTo(Equal("")) + }) + }) + + Context("image overrides", func() { + var images *current.CAImages + + Context("using registry url", func() { + BeforeEach(func() { + images = ¤t.CAImages{ + CAImage: "caimage", + CATag: "2.0.0", + CAInitImage: "cainitimage", + CAInitTag: "2.0.0", + } + }) + + It("overrides images based with registry url and does not append more value on each call", func() { + images.Override(images, "ghcr.io/ibm-blockchain/", "amd64") + Expect(images.CAImage).To(Equal("ghcr.io/ibm-blockchain/caimage")) + Expect(images.CATag).To(Equal("2.0.0")) + Expect(images.CAInitImage).To(Equal("ghcr.io/ibm-blockchain/cainitimage")) + Expect(images.CAInitTag).To(Equal("2.0.0")) + }) + + It("overrides images based with registry url and does not append more value on each call", func() { + images.Override(images, "ghcr.io/ibm-blockchain/images/", "s390x") + Expect(images.CAImage).To(Equal("ghcr.io/ibm-blockchain/images/caimage")) + Expect(images.CATag).To(Equal("2.0.0")) + Expect(images.CAInitImage).To(Equal("ghcr.io/ibm-blockchain/images/cainitimage")) + Expect(images.CAInitTag).To(Equal("2.0.0")) + + }) + }) + + Context("using fully qualified path", func() { + BeforeEach(func() { + images = ¤t.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.0", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.0", + } + }) + + It("keeps images and adds arch to tag", func() { + images.Override(images, "", "s390") + Expect(images.CAImage).To(Equal("ghcr.io/ibm-blockchain/caimage")) + Expect(images.CATag).To(Equal("2.0.0")) + Expect(images.CAInitImage).To(Equal("ghcr.io/ibm-blockchain/cainitimage")) + Expect(images.CAInitTag).To(Equal("2.0.0")) + }) + }) + }) + + Context("pre reconcile checks", func() { + Context("version and images", func() { + Context("create CR", func() { + It("returns an error if fabric version is not set in spec", func() { + instance.Spec.FabricVersion = "" + _, err := ca.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + + Context("images section blank", func() { + BeforeEach(func() { + instance.Spec.Images = nil + }) + + It("normalizes fabric version and requests a requeue", func() { + instance.Spec.FabricVersion = "1.4.9" + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + }) + + It("returns an error if fabric version not supported", func() { + instance.Spec.FabricVersion = "0.0.1" + _, err := ca.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version '0.0.1' is not supported"))) + }) + + When("version is passed without hyphen", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9" + }) + + It("finds default version for release and updates images section", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "caimage", + CATag: "1.4.9", + CAInitImage: "cainitimage", + CAInitTag: "1.4.9", + })) + }) + }) + + When("version is passed with hyphen", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("looks images and updates images section", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "caimage", + CATag: "1.4.9", + CAInitImage: "cainitimage", + CAInitTag: "1.4.9", + })) + }) + }) + }) + + Context("images section passed", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.0", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.0", + } + }) + + When("version is not passed", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "" + }) + + It("returns an error", func() { + _, err := ca.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + }) + + When("version is passed", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.0.0-8" + }) + + It("persists current spec configuration", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.0-8")) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.0", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.0", + })) + }) + }) + }) + }) + + Context("update CR", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.0.1-0" + instance.Spec.Images = ¤t.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.1", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.1", + } + }) + + When("images updated", func() { + BeforeEach(func() { + update.ImagesUpdatedReturns(true) + instance.Spec.Images = ¤t.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.8", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.8", + } + }) + + Context("and version updated", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + instance.Spec.FabricVersion = "2.0.1-8" + }) + + It("persists current spec configuration", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.1-8")) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.8", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.8", + })) + }) + }) + + Context("and version not updated", func() { + It("persists current spec configuration", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.1-0")) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.8", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.8", + })) + }) + }) + }) + + When("images not updated", func() { + Context("and version updated during operator migration", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + instance.Spec.FabricVersion = "unsupported" + }) + + It("persists current spec configuration", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("unsupported")) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "2.0.1", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "2.0.1", + })) + }) + }) + + Context("and version updated (not during operator migration)", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + }) + + When("using non-hyphenated version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9" + }) + + It("looks images and updates images section", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "caimage", + CATag: "1.4.9", + CAInitImage: "cainitimage", + CAInitTag: "1.4.9", + })) + }) + }) + + When("using hyphenated version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("looks images and updates images section", func() { + requeue, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + Expect(*instance.Spec.Images).To(Equal(current.CAImages{ + CAImage: "caimage", + CATag: "1.4.9", + CAInitImage: "cainitimage", + CAInitTag: "1.4.9", + })) + }) + }) + }) + }) + }) + }) + + Context("hsm image updates", func() { + var ( + hsmConfig = &commonconfig.HSMConfig{ + Library: commonconfig.Library{ + Image: "ghcr.io/ibm-blockchain/hsmimage:1.0.0", + }, + } + ) + + BeforeEach(func() { + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + + bytes, err := yaml.Marshal(hsmConfig) + Expect(err).NotTo(HaveOccurred()) + + o.Data = map[string]string{ + "ibp-hsm-config.yaml": string(bytes), + } + } + return nil + } + }) + + It("updates hsm image and tag if passed through operator config", func() { + updated, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(true)) + Expect(instance.Spec.Images.HSMImage).To(Equal("ghcr.io/ibm-blockchain/hsmimage")) + Expect(instance.Spec.Images.HSMTag).To(Equal("1.0.0")) + }) + + It("doesn't update hsm image and tag if hsm update is disabled", func() { + hsmConfig.Library.AutoUpdateDisabled = true + + updated, err := ca.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(false)) + Expect(instance.Spec.Images.HSMImage).To(Equal("")) + Expect(instance.Spec.Images.HSMTag).To(Equal("")) + }) + }) + }) + + Context("update connection profile", func() { + It("returns error if fails to get cert", func() { + mockKubeClient.GetReturns(errors.New("get error")) + err := ca.UpdateConnectionProfile(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("get error")) + }) + + It("updates connection profile cm", func() { + err := ca.UpdateConnectionProfile(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.GetCallCount()).To(Equal(3)) + + _, obj, _ := mockKubeClient.UpdateArgsForCall(0) + configmap := obj.(*corev1.ConfigMap) + connectionprofile := ¤t.CAConnectionProfile{} + err = json.Unmarshal(configmap.BinaryData["profile.json"], connectionprofile) + Expect(err).NotTo(HaveOccurred()) + + certEncoded := base64.StdEncoding.EncodeToString([]byte(certBase64)) + Expect(connectionprofile.TLS.Cert).To(Equal(certEncoded)) + Expect(connectionprofile.CA.SignCerts).To(Equal(certEncoded)) + Expect(connectionprofile.TLSCA.SignCerts).To(Equal(certEncoded)) + }) + }) +}) diff --git a/pkg/offering/base/ca/initialize.go b/pkg/offering/base/ca/initialize.go new file mode 100644 index 00000000..b9d6f1d0 --- /dev/null +++ b/pkg/offering/base/ca/initialize.go @@ -0,0 +1,517 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseca + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cav1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + caconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca/config" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/yaml" +) + +//go:generate counterfeiter -o mocks/initializer.go -fake-name Initializer . Initializer + +type Initializer interface { + Create(*current.IBPCA, *cav1.ServerConfig, initializer.IBPCA) (*initializer.Response, error) + Update(*current.IBPCA, *cav1.ServerConfig, initializer.IBPCA) (*initializer.Response, error) +} + +type Initialize struct { + Config *initializer.Config + Scheme *runtime.Scheme + Labels func(instance v1.Object) map[string]string + + Initializer Initializer + Client k8sclient.Client +} + +func NewInitializer(config *initializer.Config, scheme *runtime.Scheme, client k8sclient.Client, labels func(instance v1.Object) map[string]string, timeouts initializer.HSMInitJobTimeouts) *Initialize { + return &Initialize{ + Config: config, + Initializer: &initializer.Initializer{Client: client, Timeouts: timeouts}, + Scheme: scheme, + Client: client, + Labels: labels, + } +} + +func (i *Initialize) HandleEnrollmentCAInit(instance *current.IBPCA, update Update) (*initializer.Response, error) { + var err error + var resp *initializer.Response + + log.Info(fmt.Sprintf("Checking if enrollment CA '%s' needs initialization", instance.GetName())) + + if i.SecretExists(instance, fmt.Sprintf("%s-ca-crypto", instance.GetName())) { + if update.CAOverridesUpdated() { + resp, err = i.UpdateEnrollmentCAConfig(instance) + if err != nil { + return nil, err + } + } + } else { + resp, err = i.CreateEnrollmentCAConfig(instance) + if err != nil { + return nil, err + } + + } + + return resp, nil +} + +func (i *Initialize) HandleTLSCAInit(instance *current.IBPCA, update Update) (*initializer.Response, error) { + var err error + var resp *initializer.Response + + log.Info(fmt.Sprintf("Checking if TLS CA '%s' needs initialization", instance.GetName())) + + if i.SecretExists(instance, fmt.Sprintf("%s-tlsca-crypto", instance.GetName())) { + if update.TLSCAOverridesUpdated() { + resp, err = i.UpdateTLSCAConfig(instance) + if err != nil { + return nil, err + } + } + } else { + resp, err = i.CreateTLSCAConfig(instance) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +func (i *Initialize) CreateEnrollmentCAConfig(instance *current.IBPCA) (*initializer.Response, error) { + log.Info(fmt.Sprintf("Creating Enrollment CA config '%s'", instance.GetName())) + bytes, err := ioutil.ReadFile(i.Config.CADefaultConfigPath) + if err != nil { + return nil, err + } + + sca, err := i.GetEnrollmentInitCA(instance, bytes) + if err != nil { + return nil, err + } + + var caOverrides *cav1.ServerConfig + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.CA != nil { + caOverrides = &cav1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + if err != nil { + return nil, err + } + } + + resp, err := i.Initializer.Create(instance, caOverrides, sca) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *Initialize) UpdateEnrollmentCAConfig(instance *current.IBPCA) (*initializer.Response, error) { + log.Info(fmt.Sprintf("Updating Enrollment CA config '%s'", instance.GetName())) + cmname := fmt.Sprintf("%s-ca-config", instance.GetName()) + cm, err := i.ReadConfigMap(instance, cmname) + if err != nil { + return nil, err + } + + sca, err := i.GetEnrollmentInitCA(instance, cm.BinaryData["fabric-ca-server-config.yaml"]) + if err != nil { + return nil, err + } + + var caOverrides *cav1.ServerConfig + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.CA != nil { + caOverrides = &cav1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + if err != nil { + return nil, err + } + } + + resp, err := i.Initializer.Update(instance, caOverrides, sca) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *Initialize) GetEnrollmentInitCA(instance *current.IBPCA, data []byte) (*initializer.CA, error) { + serverConfig := &cav1.ServerConfig{} + err := yaml.Unmarshal(data, serverConfig) + if err != nil { + return nil, err + } + + initCAConfig := &caconfig.Config{ + ServerConfig: serverConfig, + HomeDir: filepath.Join(i.Config.SharedPath, instance.GetName(), "ca"), + MountPath: "/crypto/ca", + SqlitePath: instance.Spec.CustomNames.Sqlite, + } + + cn := instance.GetName() + "-ca" + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.CA != nil { + configOverride, err := config.ReadFrom(&instance.Spec.ConfigOverride.CA.Raw) + if err != nil { + return nil, err + } + if configOverride.ServerConfig.CSR.CN != "" { + cn = configOverride.ServerConfig.CSR.CN + } + } + + sca := initializer.NewCA(initCAConfig, caconfig.EnrollmentCA, i.Config.SharedPath, instance.UsingHSMProxy(), cn) + + return sca, nil +} + +func (i *Initialize) CreateTLSCAConfig(instance *current.IBPCA) (*initializer.Response, error) { + log.Info(fmt.Sprintf("Creating TLS CA config '%s'", instance.GetName())) + bytes, err := ioutil.ReadFile(i.Config.TLSCADefaultConfigPath) + if err != nil { + return nil, err + } + + sca, err := i.GetTLSInitCA(instance, bytes) + if err != nil { + return nil, err + } + + var tlscaOverrides *cav1.ServerConfig + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.TLSCA != nil { + tlscaOverrides = &cav1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.TLSCA.Raw, tlscaOverrides) + if err != nil { + return nil, err + } + } + + resp, err := i.Initializer.Create(instance, tlscaOverrides, sca) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *Initialize) UpdateTLSCAConfig(instance *current.IBPCA) (*initializer.Response, error) { + log.Info(fmt.Sprintf("Updating TLSCA config '%s'", instance.GetName())) + cmname := fmt.Sprintf("%s-tlsca-config", instance.GetName()) + cm, err := i.ReadConfigMap(instance, cmname) + if err != nil { + return nil, err + } + + tca, err := i.GetTLSInitCA(instance, cm.BinaryData["fabric-ca-server-config.yaml"]) + if err != nil { + return nil, err + } + + var tlscaOverrides *cav1.ServerConfig + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.TLSCA != nil { + tlscaOverrides = &cav1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.TLSCA.Raw, tlscaOverrides) + if err != nil { + return nil, err + } + } + + resp, err := i.Initializer.Update(instance, tlscaOverrides, tca) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *Initialize) GetTLSInitCA(instance *current.IBPCA, data []byte) (*initializer.CA, error) { + serverConfig := &cav1.ServerConfig{} + err := yaml.Unmarshal(data, serverConfig) + if err != nil { + return nil, err + } + + initCAConfig := &caconfig.Config{ + ServerConfig: serverConfig, + HomeDir: filepath.Join(i.Config.SharedPath, instance.GetName(), "tlsca"), + MountPath: "/crypto/tlsca", + SqlitePath: instance.Spec.CustomNames.Sqlite, + } + + cn := instance.GetName() + "-tlsca" + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.TLSCA != nil { + configOverride, err := config.ReadFrom(&instance.Spec.ConfigOverride.TLSCA.Raw) + if err != nil { + return nil, err + } + if configOverride.ServerConfig.CSR.CN != "" { + cn = configOverride.ServerConfig.CSR.CN + } + } + + tca := initializer.NewCA(initCAConfig, caconfig.TLSCA, i.Config.SharedPath, instance.UsingHSMProxy(), cn) + + return tca, nil +} + +func (i *Initialize) HandleConfigResources(name string, instance *current.IBPCA, resp *initializer.Response, update Update) error { + var err error + + if update.CAOverridesUpdated() || update.TLSCAOverridesUpdated() { + log.Info(fmt.Sprintf("Updating config resources for '%s'", name)) + err = i.UpdateConfigResources(name, instance, resp) + if err != nil { + return err + } + } else { + log.Info(fmt.Sprintf("Creating config resources for '%s'", name)) + err = i.CreateConfigResources(name, instance, resp) + if err != nil { + return err + } + } + + return nil +} + +func (i *Initialize) UpdateConfigResources(name string, instance *current.IBPCA, resp *initializer.Response) error { + var err error + + secretName := fmt.Sprintf("%s-crypto", name) + secret, err := i.GetCryptoSecret(instance, secretName) + if err != nil { + return err + } + + mergedCrypto := i.MergeCryptoMaterial(secret.Data, resp.CryptoMap) + + mergedResp := &initializer.Response{ + CryptoMap: mergedCrypto, + Config: resp.Config, + } + + err = i.CreateConfigResources(name, instance, mergedResp) + if err != nil { + return err + } + + return nil +} + +func (i *Initialize) CreateConfigResources(name string, instance *current.IBPCA, resp *initializer.Response) error { + var err error + + if len(resp.CryptoMap) > 0 { + secretName := fmt.Sprintf("%s-crypto", name) + err = i.CreateOrUpdateCryptoSecret(instance, resp.CryptoMap, secretName) + if err != nil { + return err + } + } + + if resp.Config != nil { + bytes, err := ConfigToBytes(resp.Config) + if err != nil { + return err + } + + data := map[string][]byte{ + "fabric-ca-server-config.yaml": bytes, + } + cmName := fmt.Sprintf("%s-config", name) + err = i.CreateOrUpdateConfigMap(instance, data, cmName) + if err != nil { + return err + } + } + + return nil +} + +func (i *Initialize) ReadConfigMap(instance *current.IBPCA, name string) (*corev1.ConfigMap, error) { + n := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + cm := &corev1.ConfigMap{} + err := i.Client.Get(context.TODO(), n, cm) + if err != nil { + return nil, errors.Wrap(err, "failed to get config map") + } + + return cm, nil +} + +func (i *Initialize) CreateOrUpdateConfigMap(instance *current.IBPCA, data map[string][]byte, name string) error { + cm := &corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: i.Labels(instance), + }, + BinaryData: data, + } + + err := i.Client.CreateOrUpdate(context.TODO(), cm, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: i.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create/update config map") + } + + return nil +} + +func (i *Initialize) CreateOrUpdateCryptoSecret(instance *current.IBPCA, caCrypto map[string][]byte, name string) error { + secret := &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: instance.Namespace, + Labels: i.Labels(instance), + }, + Data: caCrypto, + Type: corev1.SecretTypeOpaque, + } + + err := i.Client.CreateOrUpdate(context.TODO(), secret, k8sclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: i.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create/update secret") + } + + return nil +} + +func (i *Initialize) GetCryptoSecret(instance *current.IBPCA, name string) (*corev1.Secret, error) { + log.Info(fmt.Sprintf("Getting secret '%s'", name)) + + nn := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := i.Client.Get(context.TODO(), nn, secret) + if err != nil { + return nil, errors.Wrap(err, "failed to create/update secret") + } + + return secret, nil +} + +func (i *Initialize) SyncDBConfig(orig *current.IBPCA) (*current.IBPCA, error) { + instance := orig.DeepCopy() + if instance.Spec.ConfigOverride != nil { + if instance.Spec.ConfigOverride.CA != nil { + eca := &cav1.ServerConfig{} + err := json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, eca) + if err != nil { + return nil, err + } + + if instance.Spec.ConfigOverride.TLSCA == nil { + tca := &cav1.ServerConfig{} + tca.CAConfig.DB = eca.CAConfig.DB + + tbytes, err := json.Marshal(tca) + if err != nil { + return nil, err + } + + instance.Spec.ConfigOverride.TLSCA = &runtime.RawExtension{Raw: tbytes} + } else { + tca := &cav1.ServerConfig{} + err := json.Unmarshal(instance.Spec.ConfigOverride.TLSCA.Raw, tca) + if err != nil { + return nil, err + } + + tca.CAConfig.DB = eca.CAConfig.DB + tbytes, err := json.Marshal(tca) + if err != nil { + return nil, err + } + + instance.Spec.ConfigOverride.TLSCA = &runtime.RawExtension{Raw: tbytes} + } + } + } + return instance, nil +} + +func (i *Initialize) MergeCryptoMaterial(current map[string][]byte, updated map[string][]byte) map[string][]byte { + for ukey, umaterial := range updated { + if len(umaterial) != 0 { + current[ukey] = umaterial + } + } + + return current +} + +func (i *Initialize) SecretExists(instance *current.IBPCA, name string) bool { + n := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + s := &corev1.Secret{} + err := i.Client.Get(context.TODO(), n, s) + if err != nil { + return false + } + + return true +} + +func ConfigToBytes(c *cav1.ServerConfig) ([]byte, error) { + bytes, err := yaml.Marshal(c) + if err != nil { + return nil, err + } + + return bytes, nil +} diff --git a/pkg/offering/base/ca/initialize_test.go b/pkg/offering/base/ca/initialize_test.go new file mode 100644 index 00000000..6ded68d6 --- /dev/null +++ b/pkg/offering/base/ca/initialize_test.go @@ -0,0 +1,205 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseca_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + clientmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + cav1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + basecamocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("Initialize CA", func() { + var ( + instance *current.IBPCA + cainit *baseca.Initialize + mockinitializer *basecamocks.Initializer + mockClient *clientmocks.Client + update *basecamocks.Update + ) + + BeforeEach(func() { + jm, err := util.ConvertToJsonMessage(&cav1.ServerConfig{}) + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + ConfigOverride: ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *jm}, + TLSCA: &runtime.RawExtension{Raw: *jm}, + }, + }, + } + + config := &initializer.Config{ + CADefaultConfigPath: "../../../../defaultconfig/ca/ca.yaml", + TLSCADefaultConfigPath: "../../../../defaultconfig/ca/tlsca.yaml", + } + + update = &basecamocks.Update{} + mockClient = &clientmocks.Client{} + scheme := &runtime.Scheme{} + labels := func(v1.Object) map[string]string { + return nil + } + + mockinitializer = &basecamocks.Initializer{} + + cainit = &baseca.Initialize{ + Config: config, + Scheme: scheme, + Labels: labels, + Initializer: mockinitializer, + Client: mockClient, + } + }) + + Context("handle enrollment ca's config", func() { + Context("enrollment ca", func() { + It("calls update enrollment when update detected", func() { + update.CAOverridesUpdatedReturns(true) + _, err := cainit.HandleEnrollmentCAInit(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockinitializer.UpdateCallCount()).To(Equal(1)) + }) + + It("calls create enrollment when update detected", func() { + mockClient.GetReturns(errors.New("secret not found")) + _, err := cainit.HandleEnrollmentCAInit(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockinitializer.CreateCallCount()).To(Equal(1)) + }) + }) + + Context("tls ca", func() { + It("calls update enrollment when update detected", func() { + update.TLSCAOverridesUpdatedReturns(true) + _, err := cainit.HandleTLSCAInit(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockinitializer.UpdateCallCount()).To(Equal(1)) + }) + + It("calls create enrollment when update detected", func() { + mockClient.GetReturns(errors.New("secret not found")) + _, err := cainit.HandleTLSCAInit(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockinitializer.CreateCallCount()).To(Equal(1)) + }) + }) + }) + + Context("create enrollment ca's config", func() { + It("returns an error if create fails", func() { + msg := "failed to create" + mockinitializer.CreateReturns(nil, errors.New(msg)) + _, err := cainit.CreateEnrollmentCAConfig(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("creates config", func() { + _, err := cainit.CreateEnrollmentCAConfig(instance) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update enrollment ca's config", func() { + It("returns an error if update fails", func() { + msg := "failed to update" + mockinitializer.UpdateReturns(nil, errors.New(msg)) + _, err := cainit.UpdateEnrollmentCAConfig(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(msg)) + }) + + It("update config", func() { + _, err := cainit.UpdateEnrollmentCAConfig(instance) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("create config resouces", func() { + var resp *initializer.Response + + BeforeEach(func() { + resp = &initializer.Response{ + CryptoMap: map[string][]byte{"cert.pem": []byte("cert.pem")}, + Config: &cav1.ServerConfig{}, + } + }) + + It("returns an error if secret creation fails", func() { + msg := "failed to create secret" + mockClient.CreateOrUpdateReturns(errors.New(msg)) + err := cainit.CreateConfigResources("ibpca1", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to create/update secret: " + msg)) + }) + + It("returns an error if config map creation fails", func() { + msg := "failed to create cm" + mockClient.CreateOrUpdateReturnsOnCall(1, errors.New(msg)) + err := cainit.CreateConfigResources("ibpca1", instance, resp) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to create/update config map: " + msg)) + }) + + It("create secret and configmap", func() { + err := cainit.CreateConfigResources("ibpca1", instance, resp) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("merge crypto", func() { + var ( + oldCrypto = map[string][]byte{} + newCrypto = map[string][]byte{} + ) + + BeforeEach(func() { + oldCrypto = map[string][]byte{ + "key1.pem": []byte("key1"), + "key2.pem": []byte("key2"), + "cert.pem": []byte("cert"), + } + + newCrypto = map[string][]byte{ + "key1.pem": []byte("newkey1"), + "cert.pem": []byte("newcert"), + } + }) + + It("only updates keys that have new values", func() { + merged := cainit.MergeCryptoMaterial(oldCrypto, newCrypto) + Expect(merged["key1.pem"]).To(Equal([]byte("newkey1"))) + Expect(merged["key2.pem"]).To(Equal([]byte("key2"))) + Expect(merged["cert.pem"]).To(Equal([]byte("newcert"))) + }) + }) +}) diff --git a/pkg/offering/base/ca/mocks/certificate_manager.go b/pkg/offering/base/ca/mocks/certificate_manager.go new file mode 100644 index 00000000..ed6c5c3a --- /dev/null +++ b/pkg/offering/base/ca/mocks/certificate_manager.go @@ -0,0 +1,380 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + v1a "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CertificateManager struct { + ExpiresStub func([]byte, int64) (bool, time.Time, error) + expiresMutex sync.RWMutex + expiresArgsForCall []struct { + arg1 []byte + arg2 int64 + } + expiresReturns struct { + result1 bool + result2 time.Time + result3 error + } + expiresReturnsOnCall map[int]struct { + result1 bool + result2 time.Time + result3 error + } + GetDurationToNextRenewalForCertStub func(string, []byte, v1.Object, int64) (time.Duration, error) + getDurationToNextRenewalForCertMutex sync.RWMutex + getDurationToNextRenewalForCertArgsForCall []struct { + arg1 string + arg2 []byte + arg3 v1.Object + arg4 int64 + } + getDurationToNextRenewalForCertReturns struct { + result1 time.Duration + result2 error + } + getDurationToNextRenewalForCertReturnsOnCall map[int]struct { + result1 time.Duration + result2 error + } + GetSecretStub func(string, string) (*v1a.Secret, error) + getSecretMutex sync.RWMutex + getSecretArgsForCall []struct { + arg1 string + arg2 string + } + getSecretReturns struct { + result1 *v1a.Secret + result2 error + } + getSecretReturnsOnCall map[int]struct { + result1 *v1a.Secret + result2 error + } + UpdateSecretStub func(v1.Object, string, map[string][]byte) error + updateSecretMutex sync.RWMutex + updateSecretArgsForCall []struct { + arg1 v1.Object + arg2 string + arg3 map[string][]byte + } + updateSecretReturns struct { + result1 error + } + updateSecretReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CertificateManager) Expires(arg1 []byte, arg2 int64) (bool, time.Time, error) { + var arg1Copy []byte + if arg1 != nil { + arg1Copy = make([]byte, len(arg1)) + copy(arg1Copy, arg1) + } + fake.expiresMutex.Lock() + ret, specificReturn := fake.expiresReturnsOnCall[len(fake.expiresArgsForCall)] + fake.expiresArgsForCall = append(fake.expiresArgsForCall, struct { + arg1 []byte + arg2 int64 + }{arg1Copy, arg2}) + stub := fake.ExpiresStub + fakeReturns := fake.expiresReturns + fake.recordInvocation("Expires", []interface{}{arg1Copy, arg2}) + fake.expiresMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2, ret.result3 + } + return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 +} + +func (fake *CertificateManager) ExpiresCallCount() int { + fake.expiresMutex.RLock() + defer fake.expiresMutex.RUnlock() + return len(fake.expiresArgsForCall) +} + +func (fake *CertificateManager) ExpiresCalls(stub func([]byte, int64) (bool, time.Time, error)) { + fake.expiresMutex.Lock() + defer fake.expiresMutex.Unlock() + fake.ExpiresStub = stub +} + +func (fake *CertificateManager) ExpiresArgsForCall(i int) ([]byte, int64) { + fake.expiresMutex.RLock() + defer fake.expiresMutex.RUnlock() + argsForCall := fake.expiresArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CertificateManager) ExpiresReturns(result1 bool, result2 time.Time, result3 error) { + fake.expiresMutex.Lock() + defer fake.expiresMutex.Unlock() + fake.ExpiresStub = nil + fake.expiresReturns = struct { + result1 bool + result2 time.Time + result3 error + }{result1, result2, result3} +} + +func (fake *CertificateManager) ExpiresReturnsOnCall(i int, result1 bool, result2 time.Time, result3 error) { + fake.expiresMutex.Lock() + defer fake.expiresMutex.Unlock() + fake.ExpiresStub = nil + if fake.expiresReturnsOnCall == nil { + fake.expiresReturnsOnCall = make(map[int]struct { + result1 bool + result2 time.Time + result3 error + }) + } + fake.expiresReturnsOnCall[i] = struct { + result1 bool + result2 time.Time + result3 error + }{result1, result2, result3} +} + +func (fake *CertificateManager) GetDurationToNextRenewalForCert(arg1 string, arg2 []byte, arg3 v1.Object, arg4 int64) (time.Duration, error) { + var arg2Copy []byte + if arg2 != nil { + arg2Copy = make([]byte, len(arg2)) + copy(arg2Copy, arg2) + } + fake.getDurationToNextRenewalForCertMutex.Lock() + ret, specificReturn := fake.getDurationToNextRenewalForCertReturnsOnCall[len(fake.getDurationToNextRenewalForCertArgsForCall)] + fake.getDurationToNextRenewalForCertArgsForCall = append(fake.getDurationToNextRenewalForCertArgsForCall, struct { + arg1 string + arg2 []byte + arg3 v1.Object + arg4 int64 + }{arg1, arg2Copy, arg3, arg4}) + stub := fake.GetDurationToNextRenewalForCertStub + fakeReturns := fake.getDurationToNextRenewalForCertReturns + fake.recordInvocation("GetDurationToNextRenewalForCert", []interface{}{arg1, arg2Copy, arg3, arg4}) + fake.getDurationToNextRenewalForCertMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CertificateManager) GetDurationToNextRenewalForCertCallCount() int { + fake.getDurationToNextRenewalForCertMutex.RLock() + defer fake.getDurationToNextRenewalForCertMutex.RUnlock() + return len(fake.getDurationToNextRenewalForCertArgsForCall) +} + +func (fake *CertificateManager) GetDurationToNextRenewalForCertCalls(stub func(string, []byte, v1.Object, int64) (time.Duration, error)) { + fake.getDurationToNextRenewalForCertMutex.Lock() + defer fake.getDurationToNextRenewalForCertMutex.Unlock() + fake.GetDurationToNextRenewalForCertStub = stub +} + +func (fake *CertificateManager) GetDurationToNextRenewalForCertArgsForCall(i int) (string, []byte, v1.Object, int64) { + fake.getDurationToNextRenewalForCertMutex.RLock() + defer fake.getDurationToNextRenewalForCertMutex.RUnlock() + argsForCall := fake.getDurationToNextRenewalForCertArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *CertificateManager) GetDurationToNextRenewalForCertReturns(result1 time.Duration, result2 error) { + fake.getDurationToNextRenewalForCertMutex.Lock() + defer fake.getDurationToNextRenewalForCertMutex.Unlock() + fake.GetDurationToNextRenewalForCertStub = nil + fake.getDurationToNextRenewalForCertReturns = struct { + result1 time.Duration + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetDurationToNextRenewalForCertReturnsOnCall(i int, result1 time.Duration, result2 error) { + fake.getDurationToNextRenewalForCertMutex.Lock() + defer fake.getDurationToNextRenewalForCertMutex.Unlock() + fake.GetDurationToNextRenewalForCertStub = nil + if fake.getDurationToNextRenewalForCertReturnsOnCall == nil { + fake.getDurationToNextRenewalForCertReturnsOnCall = make(map[int]struct { + result1 time.Duration + result2 error + }) + } + fake.getDurationToNextRenewalForCertReturnsOnCall[i] = struct { + result1 time.Duration + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetSecret(arg1 string, arg2 string) (*v1a.Secret, error) { + fake.getSecretMutex.Lock() + ret, specificReturn := fake.getSecretReturnsOnCall[len(fake.getSecretArgsForCall)] + fake.getSecretArgsForCall = append(fake.getSecretArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.GetSecretStub + fakeReturns := fake.getSecretReturns + fake.recordInvocation("GetSecret", []interface{}{arg1, arg2}) + fake.getSecretMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CertificateManager) GetSecretCallCount() int { + fake.getSecretMutex.RLock() + defer fake.getSecretMutex.RUnlock() + return len(fake.getSecretArgsForCall) +} + +func (fake *CertificateManager) GetSecretCalls(stub func(string, string) (*v1a.Secret, error)) { + fake.getSecretMutex.Lock() + defer fake.getSecretMutex.Unlock() + fake.GetSecretStub = stub +} + +func (fake *CertificateManager) GetSecretArgsForCall(i int) (string, string) { + fake.getSecretMutex.RLock() + defer fake.getSecretMutex.RUnlock() + argsForCall := fake.getSecretArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CertificateManager) GetSecretReturns(result1 *v1a.Secret, result2 error) { + fake.getSecretMutex.Lock() + defer fake.getSecretMutex.Unlock() + fake.GetSecretStub = nil + fake.getSecretReturns = struct { + result1 *v1a.Secret + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetSecretReturnsOnCall(i int, result1 *v1a.Secret, result2 error) { + fake.getSecretMutex.Lock() + defer fake.getSecretMutex.Unlock() + fake.GetSecretStub = nil + if fake.getSecretReturnsOnCall == nil { + fake.getSecretReturnsOnCall = make(map[int]struct { + result1 *v1a.Secret + result2 error + }) + } + fake.getSecretReturnsOnCall[i] = struct { + result1 *v1a.Secret + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) UpdateSecret(arg1 v1.Object, arg2 string, arg3 map[string][]byte) error { + fake.updateSecretMutex.Lock() + ret, specificReturn := fake.updateSecretReturnsOnCall[len(fake.updateSecretArgsForCall)] + fake.updateSecretArgsForCall = append(fake.updateSecretArgsForCall, struct { + arg1 v1.Object + arg2 string + arg3 map[string][]byte + }{arg1, arg2, arg3}) + stub := fake.UpdateSecretStub + fakeReturns := fake.updateSecretReturns + fake.recordInvocation("UpdateSecret", []interface{}{arg1, arg2, arg3}) + fake.updateSecretMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CertificateManager) UpdateSecretCallCount() int { + fake.updateSecretMutex.RLock() + defer fake.updateSecretMutex.RUnlock() + return len(fake.updateSecretArgsForCall) +} + +func (fake *CertificateManager) UpdateSecretCalls(stub func(v1.Object, string, map[string][]byte) error) { + fake.updateSecretMutex.Lock() + defer fake.updateSecretMutex.Unlock() + fake.UpdateSecretStub = stub +} + +func (fake *CertificateManager) UpdateSecretArgsForCall(i int) (v1.Object, string, map[string][]byte) { + fake.updateSecretMutex.RLock() + defer fake.updateSecretMutex.RUnlock() + argsForCall := fake.updateSecretArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *CertificateManager) UpdateSecretReturns(result1 error) { + fake.updateSecretMutex.Lock() + defer fake.updateSecretMutex.Unlock() + fake.UpdateSecretStub = nil + fake.updateSecretReturns = struct { + result1 error + }{result1} +} + +func (fake *CertificateManager) UpdateSecretReturnsOnCall(i int, result1 error) { + fake.updateSecretMutex.Lock() + defer fake.updateSecretMutex.Unlock() + fake.UpdateSecretStub = nil + if fake.updateSecretReturnsOnCall == nil { + fake.updateSecretReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateSecretReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CertificateManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.expiresMutex.RLock() + defer fake.expiresMutex.RUnlock() + fake.getDurationToNextRenewalForCertMutex.RLock() + defer fake.getDurationToNextRenewalForCertMutex.RUnlock() + fake.getSecretMutex.RLock() + defer fake.getSecretMutex.RUnlock() + fake.updateSecretMutex.RLock() + defer fake.updateSecretMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CertificateManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseca.CertificateManager = new(CertificateManager) diff --git a/pkg/offering/base/ca/mocks/initialize.go b/pkg/offering/base/ca/mocks/initialize.go new file mode 100644 index 00000000..8f0f35b7 --- /dev/null +++ b/pkg/offering/base/ca/mocks/initialize.go @@ -0,0 +1,520 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + v1 "k8s.io/api/core/v1" +) + +type InitializeIBPCA struct { + CreateOrUpdateConfigMapStub func(*v1beta1.IBPCA, map[string][]byte, string) error + createOrUpdateConfigMapMutex sync.RWMutex + createOrUpdateConfigMapArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 map[string][]byte + arg3 string + } + createOrUpdateConfigMapReturns struct { + result1 error + } + createOrUpdateConfigMapReturnsOnCall map[int]struct { + result1 error + } + HandleConfigResourcesStub func(string, *v1beta1.IBPCA, *initializer.Response, baseca.Update) error + handleConfigResourcesMutex sync.RWMutex + handleConfigResourcesArgsForCall []struct { + arg1 string + arg2 *v1beta1.IBPCA + arg3 *initializer.Response + arg4 baseca.Update + } + handleConfigResourcesReturns struct { + result1 error + } + handleConfigResourcesReturnsOnCall map[int]struct { + result1 error + } + HandleEnrollmentCAInitStub func(*v1beta1.IBPCA, baseca.Update) (*initializer.Response, error) + handleEnrollmentCAInitMutex sync.RWMutex + handleEnrollmentCAInitArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 baseca.Update + } + handleEnrollmentCAInitReturns struct { + result1 *initializer.Response + result2 error + } + handleEnrollmentCAInitReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + HandleTLSCAInitStub func(*v1beta1.IBPCA, baseca.Update) (*initializer.Response, error) + handleTLSCAInitMutex sync.RWMutex + handleTLSCAInitArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 baseca.Update + } + handleTLSCAInitReturns struct { + result1 *initializer.Response + result2 error + } + handleTLSCAInitReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + ReadConfigMapStub func(*v1beta1.IBPCA, string) (*v1.ConfigMap, error) + readConfigMapMutex sync.RWMutex + readConfigMapArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 string + } + readConfigMapReturns struct { + result1 *v1.ConfigMap + result2 error + } + readConfigMapReturnsOnCall map[int]struct { + result1 *v1.ConfigMap + result2 error + } + SyncDBConfigStub func(*v1beta1.IBPCA) (*v1beta1.IBPCA, error) + syncDBConfigMutex sync.RWMutex + syncDBConfigArgsForCall []struct { + arg1 *v1beta1.IBPCA + } + syncDBConfigReturns struct { + result1 *v1beta1.IBPCA + result2 error + } + syncDBConfigReturnsOnCall map[int]struct { + result1 *v1beta1.IBPCA + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *InitializeIBPCA) CreateOrUpdateConfigMap(arg1 *v1beta1.IBPCA, arg2 map[string][]byte, arg3 string) error { + fake.createOrUpdateConfigMapMutex.Lock() + ret, specificReturn := fake.createOrUpdateConfigMapReturnsOnCall[len(fake.createOrUpdateConfigMapArgsForCall)] + fake.createOrUpdateConfigMapArgsForCall = append(fake.createOrUpdateConfigMapArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 map[string][]byte + arg3 string + }{arg1, arg2, arg3}) + stub := fake.CreateOrUpdateConfigMapStub + fakeReturns := fake.createOrUpdateConfigMapReturns + fake.recordInvocation("CreateOrUpdateConfigMap", []interface{}{arg1, arg2, arg3}) + fake.createOrUpdateConfigMapMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPCA) CreateOrUpdateConfigMapCallCount() int { + fake.createOrUpdateConfigMapMutex.RLock() + defer fake.createOrUpdateConfigMapMutex.RUnlock() + return len(fake.createOrUpdateConfigMapArgsForCall) +} + +func (fake *InitializeIBPCA) CreateOrUpdateConfigMapCalls(stub func(*v1beta1.IBPCA, map[string][]byte, string) error) { + fake.createOrUpdateConfigMapMutex.Lock() + defer fake.createOrUpdateConfigMapMutex.Unlock() + fake.CreateOrUpdateConfigMapStub = stub +} + +func (fake *InitializeIBPCA) CreateOrUpdateConfigMapArgsForCall(i int) (*v1beta1.IBPCA, map[string][]byte, string) { + fake.createOrUpdateConfigMapMutex.RLock() + defer fake.createOrUpdateConfigMapMutex.RUnlock() + argsForCall := fake.createOrUpdateConfigMapArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *InitializeIBPCA) CreateOrUpdateConfigMapReturns(result1 error) { + fake.createOrUpdateConfigMapMutex.Lock() + defer fake.createOrUpdateConfigMapMutex.Unlock() + fake.CreateOrUpdateConfigMapStub = nil + fake.createOrUpdateConfigMapReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPCA) CreateOrUpdateConfigMapReturnsOnCall(i int, result1 error) { + fake.createOrUpdateConfigMapMutex.Lock() + defer fake.createOrUpdateConfigMapMutex.Unlock() + fake.CreateOrUpdateConfigMapStub = nil + if fake.createOrUpdateConfigMapReturnsOnCall == nil { + fake.createOrUpdateConfigMapReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateConfigMapReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPCA) HandleConfigResources(arg1 string, arg2 *v1beta1.IBPCA, arg3 *initializer.Response, arg4 baseca.Update) error { + fake.handleConfigResourcesMutex.Lock() + ret, specificReturn := fake.handleConfigResourcesReturnsOnCall[len(fake.handleConfigResourcesArgsForCall)] + fake.handleConfigResourcesArgsForCall = append(fake.handleConfigResourcesArgsForCall, struct { + arg1 string + arg2 *v1beta1.IBPCA + arg3 *initializer.Response + arg4 baseca.Update + }{arg1, arg2, arg3, arg4}) + stub := fake.HandleConfigResourcesStub + fakeReturns := fake.handleConfigResourcesReturns + fake.recordInvocation("HandleConfigResources", []interface{}{arg1, arg2, arg3, arg4}) + fake.handleConfigResourcesMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPCA) HandleConfigResourcesCallCount() int { + fake.handleConfigResourcesMutex.RLock() + defer fake.handleConfigResourcesMutex.RUnlock() + return len(fake.handleConfigResourcesArgsForCall) +} + +func (fake *InitializeIBPCA) HandleConfigResourcesCalls(stub func(string, *v1beta1.IBPCA, *initializer.Response, baseca.Update) error) { + fake.handleConfigResourcesMutex.Lock() + defer fake.handleConfigResourcesMutex.Unlock() + fake.HandleConfigResourcesStub = stub +} + +func (fake *InitializeIBPCA) HandleConfigResourcesArgsForCall(i int) (string, *v1beta1.IBPCA, *initializer.Response, baseca.Update) { + fake.handleConfigResourcesMutex.RLock() + defer fake.handleConfigResourcesMutex.RUnlock() + argsForCall := fake.handleConfigResourcesArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *InitializeIBPCA) HandleConfigResourcesReturns(result1 error) { + fake.handleConfigResourcesMutex.Lock() + defer fake.handleConfigResourcesMutex.Unlock() + fake.HandleConfigResourcesStub = nil + fake.handleConfigResourcesReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPCA) HandleConfigResourcesReturnsOnCall(i int, result1 error) { + fake.handleConfigResourcesMutex.Lock() + defer fake.handleConfigResourcesMutex.Unlock() + fake.HandleConfigResourcesStub = nil + if fake.handleConfigResourcesReturnsOnCall == nil { + fake.handleConfigResourcesReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.handleConfigResourcesReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPCA) HandleEnrollmentCAInit(arg1 *v1beta1.IBPCA, arg2 baseca.Update) (*initializer.Response, error) { + fake.handleEnrollmentCAInitMutex.Lock() + ret, specificReturn := fake.handleEnrollmentCAInitReturnsOnCall[len(fake.handleEnrollmentCAInitArgsForCall)] + fake.handleEnrollmentCAInitArgsForCall = append(fake.handleEnrollmentCAInitArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 baseca.Update + }{arg1, arg2}) + stub := fake.HandleEnrollmentCAInitStub + fakeReturns := fake.handleEnrollmentCAInitReturns + fake.recordInvocation("HandleEnrollmentCAInit", []interface{}{arg1, arg2}) + fake.handleEnrollmentCAInitMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPCA) HandleEnrollmentCAInitCallCount() int { + fake.handleEnrollmentCAInitMutex.RLock() + defer fake.handleEnrollmentCAInitMutex.RUnlock() + return len(fake.handleEnrollmentCAInitArgsForCall) +} + +func (fake *InitializeIBPCA) HandleEnrollmentCAInitCalls(stub func(*v1beta1.IBPCA, baseca.Update) (*initializer.Response, error)) { + fake.handleEnrollmentCAInitMutex.Lock() + defer fake.handleEnrollmentCAInitMutex.Unlock() + fake.HandleEnrollmentCAInitStub = stub +} + +func (fake *InitializeIBPCA) HandleEnrollmentCAInitArgsForCall(i int) (*v1beta1.IBPCA, baseca.Update) { + fake.handleEnrollmentCAInitMutex.RLock() + defer fake.handleEnrollmentCAInitMutex.RUnlock() + argsForCall := fake.handleEnrollmentCAInitArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPCA) HandleEnrollmentCAInitReturns(result1 *initializer.Response, result2 error) { + fake.handleEnrollmentCAInitMutex.Lock() + defer fake.handleEnrollmentCAInitMutex.Unlock() + fake.HandleEnrollmentCAInitStub = nil + fake.handleEnrollmentCAInitReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) HandleEnrollmentCAInitReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.handleEnrollmentCAInitMutex.Lock() + defer fake.handleEnrollmentCAInitMutex.Unlock() + fake.HandleEnrollmentCAInitStub = nil + if fake.handleEnrollmentCAInitReturnsOnCall == nil { + fake.handleEnrollmentCAInitReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.handleEnrollmentCAInitReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) HandleTLSCAInit(arg1 *v1beta1.IBPCA, arg2 baseca.Update) (*initializer.Response, error) { + fake.handleTLSCAInitMutex.Lock() + ret, specificReturn := fake.handleTLSCAInitReturnsOnCall[len(fake.handleTLSCAInitArgsForCall)] + fake.handleTLSCAInitArgsForCall = append(fake.handleTLSCAInitArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 baseca.Update + }{arg1, arg2}) + stub := fake.HandleTLSCAInitStub + fakeReturns := fake.handleTLSCAInitReturns + fake.recordInvocation("HandleTLSCAInit", []interface{}{arg1, arg2}) + fake.handleTLSCAInitMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPCA) HandleTLSCAInitCallCount() int { + fake.handleTLSCAInitMutex.RLock() + defer fake.handleTLSCAInitMutex.RUnlock() + return len(fake.handleTLSCAInitArgsForCall) +} + +func (fake *InitializeIBPCA) HandleTLSCAInitCalls(stub func(*v1beta1.IBPCA, baseca.Update) (*initializer.Response, error)) { + fake.handleTLSCAInitMutex.Lock() + defer fake.handleTLSCAInitMutex.Unlock() + fake.HandleTLSCAInitStub = stub +} + +func (fake *InitializeIBPCA) HandleTLSCAInitArgsForCall(i int) (*v1beta1.IBPCA, baseca.Update) { + fake.handleTLSCAInitMutex.RLock() + defer fake.handleTLSCAInitMutex.RUnlock() + argsForCall := fake.handleTLSCAInitArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPCA) HandleTLSCAInitReturns(result1 *initializer.Response, result2 error) { + fake.handleTLSCAInitMutex.Lock() + defer fake.handleTLSCAInitMutex.Unlock() + fake.HandleTLSCAInitStub = nil + fake.handleTLSCAInitReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) HandleTLSCAInitReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.handleTLSCAInitMutex.Lock() + defer fake.handleTLSCAInitMutex.Unlock() + fake.HandleTLSCAInitStub = nil + if fake.handleTLSCAInitReturnsOnCall == nil { + fake.handleTLSCAInitReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.handleTLSCAInitReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) ReadConfigMap(arg1 *v1beta1.IBPCA, arg2 string) (*v1.ConfigMap, error) { + fake.readConfigMapMutex.Lock() + ret, specificReturn := fake.readConfigMapReturnsOnCall[len(fake.readConfigMapArgsForCall)] + fake.readConfigMapArgsForCall = append(fake.readConfigMapArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 string + }{arg1, arg2}) + stub := fake.ReadConfigMapStub + fakeReturns := fake.readConfigMapReturns + fake.recordInvocation("ReadConfigMap", []interface{}{arg1, arg2}) + fake.readConfigMapMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPCA) ReadConfigMapCallCount() int { + fake.readConfigMapMutex.RLock() + defer fake.readConfigMapMutex.RUnlock() + return len(fake.readConfigMapArgsForCall) +} + +func (fake *InitializeIBPCA) ReadConfigMapCalls(stub func(*v1beta1.IBPCA, string) (*v1.ConfigMap, error)) { + fake.readConfigMapMutex.Lock() + defer fake.readConfigMapMutex.Unlock() + fake.ReadConfigMapStub = stub +} + +func (fake *InitializeIBPCA) ReadConfigMapArgsForCall(i int) (*v1beta1.IBPCA, string) { + fake.readConfigMapMutex.RLock() + defer fake.readConfigMapMutex.RUnlock() + argsForCall := fake.readConfigMapArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPCA) ReadConfigMapReturns(result1 *v1.ConfigMap, result2 error) { + fake.readConfigMapMutex.Lock() + defer fake.readConfigMapMutex.Unlock() + fake.ReadConfigMapStub = nil + fake.readConfigMapReturns = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) ReadConfigMapReturnsOnCall(i int, result1 *v1.ConfigMap, result2 error) { + fake.readConfigMapMutex.Lock() + defer fake.readConfigMapMutex.Unlock() + fake.ReadConfigMapStub = nil + if fake.readConfigMapReturnsOnCall == nil { + fake.readConfigMapReturnsOnCall = make(map[int]struct { + result1 *v1.ConfigMap + result2 error + }) + } + fake.readConfigMapReturnsOnCall[i] = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) SyncDBConfig(arg1 *v1beta1.IBPCA) (*v1beta1.IBPCA, error) { + fake.syncDBConfigMutex.Lock() + ret, specificReturn := fake.syncDBConfigReturnsOnCall[len(fake.syncDBConfigArgsForCall)] + fake.syncDBConfigArgsForCall = append(fake.syncDBConfigArgsForCall, struct { + arg1 *v1beta1.IBPCA + }{arg1}) + stub := fake.SyncDBConfigStub + fakeReturns := fake.syncDBConfigReturns + fake.recordInvocation("SyncDBConfig", []interface{}{arg1}) + fake.syncDBConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPCA) SyncDBConfigCallCount() int { + fake.syncDBConfigMutex.RLock() + defer fake.syncDBConfigMutex.RUnlock() + return len(fake.syncDBConfigArgsForCall) +} + +func (fake *InitializeIBPCA) SyncDBConfigCalls(stub func(*v1beta1.IBPCA) (*v1beta1.IBPCA, error)) { + fake.syncDBConfigMutex.Lock() + defer fake.syncDBConfigMutex.Unlock() + fake.SyncDBConfigStub = stub +} + +func (fake *InitializeIBPCA) SyncDBConfigArgsForCall(i int) *v1beta1.IBPCA { + fake.syncDBConfigMutex.RLock() + defer fake.syncDBConfigMutex.RUnlock() + argsForCall := fake.syncDBConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPCA) SyncDBConfigReturns(result1 *v1beta1.IBPCA, result2 error) { + fake.syncDBConfigMutex.Lock() + defer fake.syncDBConfigMutex.Unlock() + fake.SyncDBConfigStub = nil + fake.syncDBConfigReturns = struct { + result1 *v1beta1.IBPCA + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) SyncDBConfigReturnsOnCall(i int, result1 *v1beta1.IBPCA, result2 error) { + fake.syncDBConfigMutex.Lock() + defer fake.syncDBConfigMutex.Unlock() + fake.SyncDBConfigStub = nil + if fake.syncDBConfigReturnsOnCall == nil { + fake.syncDBConfigReturnsOnCall = make(map[int]struct { + result1 *v1beta1.IBPCA + result2 error + }) + } + fake.syncDBConfigReturnsOnCall[i] = struct { + result1 *v1beta1.IBPCA + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPCA) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createOrUpdateConfigMapMutex.RLock() + defer fake.createOrUpdateConfigMapMutex.RUnlock() + fake.handleConfigResourcesMutex.RLock() + defer fake.handleConfigResourcesMutex.RUnlock() + fake.handleEnrollmentCAInitMutex.RLock() + defer fake.handleEnrollmentCAInitMutex.RUnlock() + fake.handleTLSCAInitMutex.RLock() + defer fake.handleTLSCAInitMutex.RUnlock() + fake.readConfigMapMutex.RLock() + defer fake.readConfigMapMutex.RUnlock() + fake.syncDBConfigMutex.RLock() + defer fake.syncDBConfigMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *InitializeIBPCA) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseca.InitializeIBPCA = new(InitializeIBPCA) diff --git a/pkg/offering/base/ca/mocks/initializer.go b/pkg/offering/base/ca/mocks/initializer.go new file mode 100644 index 00000000..74611c2e --- /dev/null +++ b/pkg/offering/base/ca/mocks/initializer.go @@ -0,0 +1,206 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" +) + +type Initializer struct { + CreateStub func(*v1beta1.IBPCA, *v1.ServerConfig, initializer.IBPCA) (*initializer.Response, error) + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 *v1.ServerConfig + arg3 initializer.IBPCA + } + createReturns struct { + result1 *initializer.Response + result2 error + } + createReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + UpdateStub func(*v1beta1.IBPCA, *v1.ServerConfig, initializer.IBPCA) (*initializer.Response, error) + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 *v1beta1.IBPCA + arg2 *v1.ServerConfig + arg3 initializer.IBPCA + } + updateReturns struct { + result1 *initializer.Response + result2 error + } + updateReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Initializer) Create(arg1 *v1beta1.IBPCA, arg2 *v1.ServerConfig, arg3 initializer.IBPCA) (*initializer.Response, error) { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 *v1.ServerConfig + arg3 initializer.IBPCA + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Initializer) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *Initializer) CreateCalls(stub func(*v1beta1.IBPCA, *v1.ServerConfig, initializer.IBPCA) (*initializer.Response, error)) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *Initializer) CreateArgsForCall(i int) (*v1beta1.IBPCA, *v1.ServerConfig, initializer.IBPCA) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Initializer) CreateReturns(result1 *initializer.Response, result2 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *Initializer) CreateReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *Initializer) Update(arg1 *v1beta1.IBPCA, arg2 *v1.ServerConfig, arg3 initializer.IBPCA) (*initializer.Response, error) { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 *v1beta1.IBPCA + arg2 *v1.ServerConfig + arg3 initializer.IBPCA + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *Initializer) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *Initializer) UpdateCalls(stub func(*v1beta1.IBPCA, *v1.ServerConfig, initializer.IBPCA) (*initializer.Response, error)) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *Initializer) UpdateArgsForCall(i int) (*v1beta1.IBPCA, *v1.ServerConfig, initializer.IBPCA) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *Initializer) UpdateReturns(result1 *initializer.Response, result2 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *Initializer) UpdateReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *Initializer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Initializer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseca.Initializer = new(Initializer) diff --git a/pkg/offering/base/ca/mocks/restart_manager.go b/pkg/offering/base/ca/mocks/restart_manager.go new file mode 100644 index 00000000..175b6373 --- /dev/null +++ b/pkg/offering/base/ca/mocks/restart_manager.go @@ -0,0 +1,335 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type RestartManager struct { + ForConfigOverrideStub func(v1.Object) error + forConfigOverrideMutex sync.RWMutex + forConfigOverrideArgsForCall []struct { + arg1 v1.Object + } + forConfigOverrideReturns struct { + result1 error + } + forConfigOverrideReturnsOnCall map[int]struct { + result1 error + } + ForRestartActionStub func(v1.Object) error + forRestartActionMutex sync.RWMutex + forRestartActionArgsForCall []struct { + arg1 v1.Object + } + forRestartActionReturns struct { + result1 error + } + forRestartActionReturnsOnCall map[int]struct { + result1 error + } + ForTLSReenrollStub func(v1.Object) error + forTLSReenrollMutex sync.RWMutex + forTLSReenrollArgsForCall []struct { + arg1 v1.Object + } + forTLSReenrollReturns struct { + result1 error + } + forTLSReenrollReturnsOnCall map[int]struct { + result1 error + } + TriggerIfNeededStub func(restart.Instance) error + triggerIfNeededMutex sync.RWMutex + triggerIfNeededArgsForCall []struct { + arg1 restart.Instance + } + triggerIfNeededReturns struct { + result1 error + } + triggerIfNeededReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *RestartManager) ForConfigOverride(arg1 v1.Object) error { + fake.forConfigOverrideMutex.Lock() + ret, specificReturn := fake.forConfigOverrideReturnsOnCall[len(fake.forConfigOverrideArgsForCall)] + fake.forConfigOverrideArgsForCall = append(fake.forConfigOverrideArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForConfigOverrideStub + fakeReturns := fake.forConfigOverrideReturns + fake.recordInvocation("ForConfigOverride", []interface{}{arg1}) + fake.forConfigOverrideMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForConfigOverrideCallCount() int { + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + return len(fake.forConfigOverrideArgsForCall) +} + +func (fake *RestartManager) ForConfigOverrideCalls(stub func(v1.Object) error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = stub +} + +func (fake *RestartManager) ForConfigOverrideArgsForCall(i int) v1.Object { + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + argsForCall := fake.forConfigOverrideArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForConfigOverrideReturns(result1 error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = nil + fake.forConfigOverrideReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForConfigOverrideReturnsOnCall(i int, result1 error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = nil + if fake.forConfigOverrideReturnsOnCall == nil { + fake.forConfigOverrideReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forConfigOverrideReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartAction(arg1 v1.Object) error { + fake.forRestartActionMutex.Lock() + ret, specificReturn := fake.forRestartActionReturnsOnCall[len(fake.forRestartActionArgsForCall)] + fake.forRestartActionArgsForCall = append(fake.forRestartActionArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForRestartActionStub + fakeReturns := fake.forRestartActionReturns + fake.recordInvocation("ForRestartAction", []interface{}{arg1}) + fake.forRestartActionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForRestartActionCallCount() int { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + return len(fake.forRestartActionArgsForCall) +} + +func (fake *RestartManager) ForRestartActionCalls(stub func(v1.Object) error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = stub +} + +func (fake *RestartManager) ForRestartActionArgsForCall(i int) v1.Object { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + argsForCall := fake.forRestartActionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForRestartActionReturns(result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + fake.forRestartActionReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartActionReturnsOnCall(i int, result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + if fake.forRestartActionReturnsOnCall == nil { + fake.forRestartActionReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forRestartActionReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForTLSReenroll(arg1 v1.Object) error { + fake.forTLSReenrollMutex.Lock() + ret, specificReturn := fake.forTLSReenrollReturnsOnCall[len(fake.forTLSReenrollArgsForCall)] + fake.forTLSReenrollArgsForCall = append(fake.forTLSReenrollArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForTLSReenrollStub + fakeReturns := fake.forTLSReenrollReturns + fake.recordInvocation("ForTLSReenroll", []interface{}{arg1}) + fake.forTLSReenrollMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForTLSReenrollCallCount() int { + fake.forTLSReenrollMutex.RLock() + defer fake.forTLSReenrollMutex.RUnlock() + return len(fake.forTLSReenrollArgsForCall) +} + +func (fake *RestartManager) ForTLSReenrollCalls(stub func(v1.Object) error) { + fake.forTLSReenrollMutex.Lock() + defer fake.forTLSReenrollMutex.Unlock() + fake.ForTLSReenrollStub = stub +} + +func (fake *RestartManager) ForTLSReenrollArgsForCall(i int) v1.Object { + fake.forTLSReenrollMutex.RLock() + defer fake.forTLSReenrollMutex.RUnlock() + argsForCall := fake.forTLSReenrollArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForTLSReenrollReturns(result1 error) { + fake.forTLSReenrollMutex.Lock() + defer fake.forTLSReenrollMutex.Unlock() + fake.ForTLSReenrollStub = nil + fake.forTLSReenrollReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForTLSReenrollReturnsOnCall(i int, result1 error) { + fake.forTLSReenrollMutex.Lock() + defer fake.forTLSReenrollMutex.Unlock() + fake.ForTLSReenrollStub = nil + if fake.forTLSReenrollReturnsOnCall == nil { + fake.forTLSReenrollReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forTLSReenrollReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeeded(arg1 restart.Instance) error { + fake.triggerIfNeededMutex.Lock() + ret, specificReturn := fake.triggerIfNeededReturnsOnCall[len(fake.triggerIfNeededArgsForCall)] + fake.triggerIfNeededArgsForCall = append(fake.triggerIfNeededArgsForCall, struct { + arg1 restart.Instance + }{arg1}) + stub := fake.TriggerIfNeededStub + fakeReturns := fake.triggerIfNeededReturns + fake.recordInvocation("TriggerIfNeeded", []interface{}{arg1}) + fake.triggerIfNeededMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) TriggerIfNeededCallCount() int { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + return len(fake.triggerIfNeededArgsForCall) +} + +func (fake *RestartManager) TriggerIfNeededCalls(stub func(restart.Instance) error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = stub +} + +func (fake *RestartManager) TriggerIfNeededArgsForCall(i int) restart.Instance { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + argsForCall := fake.triggerIfNeededArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) TriggerIfNeededReturns(result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + fake.triggerIfNeededReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeededReturnsOnCall(i int, result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + if fake.triggerIfNeededReturnsOnCall == nil { + fake.triggerIfNeededReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.triggerIfNeededReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + fake.forTLSReenrollMutex.RLock() + defer fake.forTLSReenrollMutex.RUnlock() + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *RestartManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseca.RestartManager = new(RestartManager) diff --git a/pkg/offering/base/ca/mocks/update.go b/pkg/offering/base/ca/mocks/update.go new file mode 100644 index 00000000..4ac87ff9 --- /dev/null +++ b/pkg/offering/base/ca/mocks/update.go @@ -0,0 +1,752 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" +) + +type Update struct { + CACryptoCreatedStub func() bool + cACryptoCreatedMutex sync.RWMutex + cACryptoCreatedArgsForCall []struct { + } + cACryptoCreatedReturns struct { + result1 bool + } + cACryptoCreatedReturnsOnCall map[int]struct { + result1 bool + } + CACryptoUpdatedStub func() bool + cACryptoUpdatedMutex sync.RWMutex + cACryptoUpdatedArgsForCall []struct { + } + cACryptoUpdatedReturns struct { + result1 bool + } + cACryptoUpdatedReturnsOnCall map[int]struct { + result1 bool + } + CAOverridesUpdatedStub func() bool + cAOverridesUpdatedMutex sync.RWMutex + cAOverridesUpdatedArgsForCall []struct { + } + cAOverridesUpdatedReturns struct { + result1 bool + } + cAOverridesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + CATagUpdatedStub func() bool + cATagUpdatedMutex sync.RWMutex + cATagUpdatedArgsForCall []struct { + } + cATagUpdatedReturns struct { + result1 bool + } + cATagUpdatedReturnsOnCall map[int]struct { + result1 bool + } + ConfigOverridesUpdatedStub func() bool + configOverridesUpdatedMutex sync.RWMutex + configOverridesUpdatedArgsForCall []struct { + } + configOverridesUpdatedReturns struct { + result1 bool + } + configOverridesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + FabricVersionUpdatedStub func() bool + fabricVersionUpdatedMutex sync.RWMutex + fabricVersionUpdatedArgsForCall []struct { + } + fabricVersionUpdatedReturns struct { + result1 bool + } + fabricVersionUpdatedReturnsOnCall map[int]struct { + result1 bool + } + ImagesUpdatedStub func() bool + imagesUpdatedMutex sync.RWMutex + imagesUpdatedArgsForCall []struct { + } + imagesUpdatedReturns struct { + result1 bool + } + imagesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + RenewTLSCertStub func() bool + renewTLSCertMutex sync.RWMutex + renewTLSCertArgsForCall []struct { + } + renewTLSCertReturns struct { + result1 bool + } + renewTLSCertReturnsOnCall map[int]struct { + result1 bool + } + RestartNeededStub func() bool + restartNeededMutex sync.RWMutex + restartNeededArgsForCall []struct { + } + restartNeededReturns struct { + result1 bool + } + restartNeededReturnsOnCall map[int]struct { + result1 bool + } + SpecUpdatedStub func() bool + specUpdatedMutex sync.RWMutex + specUpdatedArgsForCall []struct { + } + specUpdatedReturns struct { + result1 bool + } + specUpdatedReturnsOnCall map[int]struct { + result1 bool + } + TLSCAOverridesUpdatedStub func() bool + tLSCAOverridesUpdatedMutex sync.RWMutex + tLSCAOverridesUpdatedArgsForCall []struct { + } + tLSCAOverridesUpdatedReturns struct { + result1 bool + } + tLSCAOverridesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Update) CACryptoCreated() bool { + fake.cACryptoCreatedMutex.Lock() + ret, specificReturn := fake.cACryptoCreatedReturnsOnCall[len(fake.cACryptoCreatedArgsForCall)] + fake.cACryptoCreatedArgsForCall = append(fake.cACryptoCreatedArgsForCall, struct { + }{}) + stub := fake.CACryptoCreatedStub + fakeReturns := fake.cACryptoCreatedReturns + fake.recordInvocation("CACryptoCreated", []interface{}{}) + fake.cACryptoCreatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CACryptoCreatedCallCount() int { + fake.cACryptoCreatedMutex.RLock() + defer fake.cACryptoCreatedMutex.RUnlock() + return len(fake.cACryptoCreatedArgsForCall) +} + +func (fake *Update) CACryptoCreatedCalls(stub func() bool) { + fake.cACryptoCreatedMutex.Lock() + defer fake.cACryptoCreatedMutex.Unlock() + fake.CACryptoCreatedStub = stub +} + +func (fake *Update) CACryptoCreatedReturns(result1 bool) { + fake.cACryptoCreatedMutex.Lock() + defer fake.cACryptoCreatedMutex.Unlock() + fake.CACryptoCreatedStub = nil + fake.cACryptoCreatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CACryptoCreatedReturnsOnCall(i int, result1 bool) { + fake.cACryptoCreatedMutex.Lock() + defer fake.cACryptoCreatedMutex.Unlock() + fake.CACryptoCreatedStub = nil + if fake.cACryptoCreatedReturnsOnCall == nil { + fake.cACryptoCreatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.cACryptoCreatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CACryptoUpdated() bool { + fake.cACryptoUpdatedMutex.Lock() + ret, specificReturn := fake.cACryptoUpdatedReturnsOnCall[len(fake.cACryptoUpdatedArgsForCall)] + fake.cACryptoUpdatedArgsForCall = append(fake.cACryptoUpdatedArgsForCall, struct { + }{}) + stub := fake.CACryptoUpdatedStub + fakeReturns := fake.cACryptoUpdatedReturns + fake.recordInvocation("CACryptoUpdated", []interface{}{}) + fake.cACryptoUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CACryptoUpdatedCallCount() int { + fake.cACryptoUpdatedMutex.RLock() + defer fake.cACryptoUpdatedMutex.RUnlock() + return len(fake.cACryptoUpdatedArgsForCall) +} + +func (fake *Update) CACryptoUpdatedCalls(stub func() bool) { + fake.cACryptoUpdatedMutex.Lock() + defer fake.cACryptoUpdatedMutex.Unlock() + fake.CACryptoUpdatedStub = stub +} + +func (fake *Update) CACryptoUpdatedReturns(result1 bool) { + fake.cACryptoUpdatedMutex.Lock() + defer fake.cACryptoUpdatedMutex.Unlock() + fake.CACryptoUpdatedStub = nil + fake.cACryptoUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CACryptoUpdatedReturnsOnCall(i int, result1 bool) { + fake.cACryptoUpdatedMutex.Lock() + defer fake.cACryptoUpdatedMutex.Unlock() + fake.CACryptoUpdatedStub = nil + if fake.cACryptoUpdatedReturnsOnCall == nil { + fake.cACryptoUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.cACryptoUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CAOverridesUpdated() bool { + fake.cAOverridesUpdatedMutex.Lock() + ret, specificReturn := fake.cAOverridesUpdatedReturnsOnCall[len(fake.cAOverridesUpdatedArgsForCall)] + fake.cAOverridesUpdatedArgsForCall = append(fake.cAOverridesUpdatedArgsForCall, struct { + }{}) + stub := fake.CAOverridesUpdatedStub + fakeReturns := fake.cAOverridesUpdatedReturns + fake.recordInvocation("CAOverridesUpdated", []interface{}{}) + fake.cAOverridesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CAOverridesUpdatedCallCount() int { + fake.cAOverridesUpdatedMutex.RLock() + defer fake.cAOverridesUpdatedMutex.RUnlock() + return len(fake.cAOverridesUpdatedArgsForCall) +} + +func (fake *Update) CAOverridesUpdatedCalls(stub func() bool) { + fake.cAOverridesUpdatedMutex.Lock() + defer fake.cAOverridesUpdatedMutex.Unlock() + fake.CAOverridesUpdatedStub = stub +} + +func (fake *Update) CAOverridesUpdatedReturns(result1 bool) { + fake.cAOverridesUpdatedMutex.Lock() + defer fake.cAOverridesUpdatedMutex.Unlock() + fake.CAOverridesUpdatedStub = nil + fake.cAOverridesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CAOverridesUpdatedReturnsOnCall(i int, result1 bool) { + fake.cAOverridesUpdatedMutex.Lock() + defer fake.cAOverridesUpdatedMutex.Unlock() + fake.CAOverridesUpdatedStub = nil + if fake.cAOverridesUpdatedReturnsOnCall == nil { + fake.cAOverridesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.cAOverridesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CATagUpdated() bool { + fake.cATagUpdatedMutex.Lock() + ret, specificReturn := fake.cATagUpdatedReturnsOnCall[len(fake.cATagUpdatedArgsForCall)] + fake.cATagUpdatedArgsForCall = append(fake.cATagUpdatedArgsForCall, struct { + }{}) + stub := fake.CATagUpdatedStub + fakeReturns := fake.cATagUpdatedReturns + fake.recordInvocation("CATagUpdated", []interface{}{}) + fake.cATagUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CATagUpdatedCallCount() int { + fake.cATagUpdatedMutex.RLock() + defer fake.cATagUpdatedMutex.RUnlock() + return len(fake.cATagUpdatedArgsForCall) +} + +func (fake *Update) CATagUpdatedCalls(stub func() bool) { + fake.cATagUpdatedMutex.Lock() + defer fake.cATagUpdatedMutex.Unlock() + fake.CATagUpdatedStub = stub +} + +func (fake *Update) CATagUpdatedReturns(result1 bool) { + fake.cATagUpdatedMutex.Lock() + defer fake.cATagUpdatedMutex.Unlock() + fake.CATagUpdatedStub = nil + fake.cATagUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CATagUpdatedReturnsOnCall(i int, result1 bool) { + fake.cATagUpdatedMutex.Lock() + defer fake.cATagUpdatedMutex.Unlock() + fake.CATagUpdatedStub = nil + if fake.cATagUpdatedReturnsOnCall == nil { + fake.cATagUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.cATagUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConfigOverridesUpdated() bool { + fake.configOverridesUpdatedMutex.Lock() + ret, specificReturn := fake.configOverridesUpdatedReturnsOnCall[len(fake.configOverridesUpdatedArgsForCall)] + fake.configOverridesUpdatedArgsForCall = append(fake.configOverridesUpdatedArgsForCall, struct { + }{}) + stub := fake.ConfigOverridesUpdatedStub + fakeReturns := fake.configOverridesUpdatedReturns + fake.recordInvocation("ConfigOverridesUpdated", []interface{}{}) + fake.configOverridesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ConfigOverridesUpdatedCallCount() int { + fake.configOverridesUpdatedMutex.RLock() + defer fake.configOverridesUpdatedMutex.RUnlock() + return len(fake.configOverridesUpdatedArgsForCall) +} + +func (fake *Update) ConfigOverridesUpdatedCalls(stub func() bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = stub +} + +func (fake *Update) ConfigOverridesUpdatedReturns(result1 bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = nil + fake.configOverridesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConfigOverridesUpdatedReturnsOnCall(i int, result1 bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = nil + if fake.configOverridesUpdatedReturnsOnCall == nil { + fake.configOverridesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.configOverridesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdated() bool { + fake.fabricVersionUpdatedMutex.Lock() + ret, specificReturn := fake.fabricVersionUpdatedReturnsOnCall[len(fake.fabricVersionUpdatedArgsForCall)] + fake.fabricVersionUpdatedArgsForCall = append(fake.fabricVersionUpdatedArgsForCall, struct { + }{}) + stub := fake.FabricVersionUpdatedStub + fakeReturns := fake.fabricVersionUpdatedReturns + fake.recordInvocation("FabricVersionUpdated", []interface{}{}) + fake.fabricVersionUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) FabricVersionUpdatedCallCount() int { + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + return len(fake.fabricVersionUpdatedArgsForCall) +} + +func (fake *Update) FabricVersionUpdatedCalls(stub func() bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = stub +} + +func (fake *Update) FabricVersionUpdatedReturns(result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + fake.fabricVersionUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdatedReturnsOnCall(i int, result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + if fake.fabricVersionUpdatedReturnsOnCall == nil { + fake.fabricVersionUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.fabricVersionUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdated() bool { + fake.imagesUpdatedMutex.Lock() + ret, specificReturn := fake.imagesUpdatedReturnsOnCall[len(fake.imagesUpdatedArgsForCall)] + fake.imagesUpdatedArgsForCall = append(fake.imagesUpdatedArgsForCall, struct { + }{}) + stub := fake.ImagesUpdatedStub + fakeReturns := fake.imagesUpdatedReturns + fake.recordInvocation("ImagesUpdated", []interface{}{}) + fake.imagesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ImagesUpdatedCallCount() int { + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + return len(fake.imagesUpdatedArgsForCall) +} + +func (fake *Update) ImagesUpdatedCalls(stub func() bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = stub +} + +func (fake *Update) ImagesUpdatedReturns(result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + fake.imagesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdatedReturnsOnCall(i int, result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + if fake.imagesUpdatedReturnsOnCall == nil { + fake.imagesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) RenewTLSCert() bool { + fake.renewTLSCertMutex.Lock() + ret, specificReturn := fake.renewTLSCertReturnsOnCall[len(fake.renewTLSCertArgsForCall)] + fake.renewTLSCertArgsForCall = append(fake.renewTLSCertArgsForCall, struct { + }{}) + stub := fake.RenewTLSCertStub + fakeReturns := fake.renewTLSCertReturns + fake.recordInvocation("RenewTLSCert", []interface{}{}) + fake.renewTLSCertMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) RenewTLSCertCallCount() int { + fake.renewTLSCertMutex.RLock() + defer fake.renewTLSCertMutex.RUnlock() + return len(fake.renewTLSCertArgsForCall) +} + +func (fake *Update) RenewTLSCertCalls(stub func() bool) { + fake.renewTLSCertMutex.Lock() + defer fake.renewTLSCertMutex.Unlock() + fake.RenewTLSCertStub = stub +} + +func (fake *Update) RenewTLSCertReturns(result1 bool) { + fake.renewTLSCertMutex.Lock() + defer fake.renewTLSCertMutex.Unlock() + fake.RenewTLSCertStub = nil + fake.renewTLSCertReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) RenewTLSCertReturnsOnCall(i int, result1 bool) { + fake.renewTLSCertMutex.Lock() + defer fake.renewTLSCertMutex.Unlock() + fake.RenewTLSCertStub = nil + if fake.renewTLSCertReturnsOnCall == nil { + fake.renewTLSCertReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.renewTLSCertReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeeded() bool { + fake.restartNeededMutex.Lock() + ret, specificReturn := fake.restartNeededReturnsOnCall[len(fake.restartNeededArgsForCall)] + fake.restartNeededArgsForCall = append(fake.restartNeededArgsForCall, struct { + }{}) + stub := fake.RestartNeededStub + fakeReturns := fake.restartNeededReturns + fake.recordInvocation("RestartNeeded", []interface{}{}) + fake.restartNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) RestartNeededCallCount() int { + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + return len(fake.restartNeededArgsForCall) +} + +func (fake *Update) RestartNeededCalls(stub func() bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = stub +} + +func (fake *Update) RestartNeededReturns(result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + fake.restartNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeededReturnsOnCall(i int, result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + if fake.restartNeededReturnsOnCall == nil { + fake.restartNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.restartNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdated() bool { + fake.specUpdatedMutex.Lock() + ret, specificReturn := fake.specUpdatedReturnsOnCall[len(fake.specUpdatedArgsForCall)] + fake.specUpdatedArgsForCall = append(fake.specUpdatedArgsForCall, struct { + }{}) + stub := fake.SpecUpdatedStub + fakeReturns := fake.specUpdatedReturns + fake.recordInvocation("SpecUpdated", []interface{}{}) + fake.specUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) SpecUpdatedCallCount() int { + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + return len(fake.specUpdatedArgsForCall) +} + +func (fake *Update) SpecUpdatedCalls(stub func() bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = stub +} + +func (fake *Update) SpecUpdatedReturns(result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + fake.specUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdatedReturnsOnCall(i int, result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + if fake.specUpdatedReturnsOnCall == nil { + fake.specUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.specUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCAOverridesUpdated() bool { + fake.tLSCAOverridesUpdatedMutex.Lock() + ret, specificReturn := fake.tLSCAOverridesUpdatedReturnsOnCall[len(fake.tLSCAOverridesUpdatedArgsForCall)] + fake.tLSCAOverridesUpdatedArgsForCall = append(fake.tLSCAOverridesUpdatedArgsForCall, struct { + }{}) + stub := fake.TLSCAOverridesUpdatedStub + fakeReturns := fake.tLSCAOverridesUpdatedReturns + fake.recordInvocation("TLSCAOverridesUpdated", []interface{}{}) + fake.tLSCAOverridesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLSCAOverridesUpdatedCallCount() int { + fake.tLSCAOverridesUpdatedMutex.RLock() + defer fake.tLSCAOverridesUpdatedMutex.RUnlock() + return len(fake.tLSCAOverridesUpdatedArgsForCall) +} + +func (fake *Update) TLSCAOverridesUpdatedCalls(stub func() bool) { + fake.tLSCAOverridesUpdatedMutex.Lock() + defer fake.tLSCAOverridesUpdatedMutex.Unlock() + fake.TLSCAOverridesUpdatedStub = stub +} + +func (fake *Update) TLSCAOverridesUpdatedReturns(result1 bool) { + fake.tLSCAOverridesUpdatedMutex.Lock() + defer fake.tLSCAOverridesUpdatedMutex.Unlock() + fake.TLSCAOverridesUpdatedStub = nil + fake.tLSCAOverridesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCAOverridesUpdatedReturnsOnCall(i int, result1 bool) { + fake.tLSCAOverridesUpdatedMutex.Lock() + defer fake.tLSCAOverridesUpdatedMutex.Unlock() + fake.TLSCAOverridesUpdatedStub = nil + if fake.tLSCAOverridesUpdatedReturnsOnCall == nil { + fake.tLSCAOverridesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSCAOverridesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.cACryptoCreatedMutex.RLock() + defer fake.cACryptoCreatedMutex.RUnlock() + fake.cACryptoUpdatedMutex.RLock() + defer fake.cACryptoUpdatedMutex.RUnlock() + fake.cAOverridesUpdatedMutex.RLock() + defer fake.cAOverridesUpdatedMutex.RUnlock() + fake.cATagUpdatedMutex.RLock() + defer fake.cATagUpdatedMutex.RUnlock() + fake.configOverridesUpdatedMutex.RLock() + defer fake.configOverridesUpdatedMutex.RUnlock() + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + fake.renewTLSCertMutex.RLock() + defer fake.renewTLSCertMutex.RUnlock() + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + fake.tLSCAOverridesUpdatedMutex.RLock() + defer fake.tLSCAOverridesUpdatedMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Update) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseca.Update = new(Update) diff --git a/pkg/offering/base/ca/override/deployment.go b/pkg/offering/base/ca/override/deployment.go new file mode 100644 index 00000000..4e9077da --- /dev/null +++ b/pkg/offering/base/ca/override/deployment.go @@ -0,0 +1,369 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "encoding/json" + "fmt" + "path/filepath" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cav1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Container names +const ( + INIT = "init" + CA = "ca" + HSMCLIENT = "hsm-client" +) + +func (o *Override) Deployment(object v1.Object, deployment *appsv1.Deployment, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateDeployment(instance, deployment) + case resources.Update: + return o.UpdateDeployment(instance, deployment) + } + + return nil +} + +func (o *Override) CreateDeployment(instance *current.IBPCA, k8sDep *appsv1.Deployment) error { + var err error + + if !instance.Spec.License.Accept { + return errors.New("user must accept license before continuing") + } + + deployment := dep.New(k8sDep) + + name := instance.GetName() + deployment.Spec.Template.Spec.ServiceAccountName = serviceaccount.GetName(name) + err = o.CommonDeployment(instance, deployment) + if err != nil { + return err + } + + caCont, err := deployment.GetContainer(CA) + if err != nil { + return errors.New("ca container not found in deployment spec") + } + initCont, err := deployment.GetContainer(INIT) + if err != nil { + return errors.New("init container not found in deployment spec") + } + + deployment.SetImagePullSecrets(instance.Spec.ImagePullSecrets) + + if !o.IsPostgres(instance) { + claimName := instance.Name + "-pvc" + if instance.Spec.CustomNames.PVC.CA != "" { + claimName = instance.Spec.CustomNames.PVC.CA + } + deployment.AppendPVCVolumeIfMissing("fabric-ca", claimName) + + initCont.AppendVolumeMountWithSubPathIfMissing("fabric-ca", "/data", "fabric-ca-server") + caCont.AppendVolumeMountWithSubPathIfMissing("fabric-ca", "/data", "fabric-ca-server") + } else { + initCont.AppendVolumeMountIfMissing("shared", "/data") + caCont.AppendVolumeMountIfMissing("shared", "/data") + } + + deployment.AppendSecretVolumeIfMissing("ca-crypto", instance.Name+"-ca-crypto") + deployment.AppendSecretVolumeIfMissing("tlsca-crypto", instance.Name+"-tlsca-crypto") + deployment.AppendConfigMapVolumeIfMissing("ca-config", instance.Name+"-ca-config") + deployment.AppendConfigMapVolumeIfMissing("tlsca-config", instance.Name+"-tlsca-config") + deployment.SetAffinity(o.GetAffinity(instance)) + + if instance.UsingHSMProxy() { + caCont.AppendEnvIfMissing("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + } else if instance.IsHSMEnabled() { + hsmConfig, err := config.ReadHSMConfig(o.Client, instance) + if err != nil { + return errors.Wrapf(err, "failed to apply hsm settings to '%s' deployment", instance.GetName()) + } + + hsmSettings(instance, hsmConfig, caCont, deployment) + } + + return nil +} + +func (o *Override) UpdateDeployment(instance *current.IBPCA, k8sDep *appsv1.Deployment) error { + deployment := dep.New(k8sDep) + err := o.CommonDeployment(instance, deployment) + if err != nil { + return err + } + + if instance.UsingHSMProxy() { + caCont := deployment.MustGetContainer(CA) + caCont.UpdateEnv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + deployment.UpdateContainer(caCont) + } else if instance.IsHSMEnabled() { + hsmInitCont := deployment.MustGetContainer(HSMCLIENT) + image := instance.Spec.Images + if image != nil { + hsmInitCont.SetImage(image.HSMImage, image.HSMTag) + } + } + + return nil +} + +func (o *Override) CommonDeployment(instance *current.IBPCA, deployment *dep.Deployment) error { + caCont := deployment.MustGetContainer(CA) + initCont := deployment.MustGetContainer(INIT) + + if instance.Spec.CAResourcesSet() { + err := caCont.UpdateResources(instance.Spec.Resources.CA) + if err != nil { + return errors.Wrap(err, "update resources for ca failed") + } + } + + if instance.Spec.InitResourcesSet() { + err := initCont.UpdateResources(instance.Spec.Resources.Init) + if err != nil { + return errors.Wrap(err, "update resources for init failed") + } + } + + image := instance.Spec.Images + if image != nil { + caCont.SetImage(image.CAImage, image.CATag) + initCont.SetImage(image.CAInitImage, image.CAInitTag) + } + + if o.IsPostgres(instance) { + deployment.SetStrategy(appsv1.RollingUpdateDeploymentStrategyType) + } + + // TODO: Find a clean way to check for valid config other than the nested if/else statements + if instance.Spec.Replicas != nil { + if *instance.Spec.Replicas > 1 { + err := o.ValidateConfigOverride(instance.Spec.ConfigOverride) + if err != nil { + return err + } + } + + deployment.SetReplicas(instance.Spec.Replicas) + } + + return nil +} + +func (o *Override) ValidateConfigOverride(configOverride *current.ConfigOverride) error { + var byteArray *[]byte + if configOverride == nil { + return errors.New("Failed to provide override configuration to support greater than 1 replicas") + } + + if configOverride.CA != nil { + err := o.ValidateServerConfig(&configOverride.CA.Raw, "CA") + if err != nil { + return err + } + } else { // if it is nil call with empty bytearray + err := o.ValidateServerConfig(byteArray, "CA") + if err != nil { + return err + } + } + + if configOverride.TLSCA != nil { + err := o.ValidateServerConfig(&configOverride.TLSCA.Raw, "TLSCA") + if err != nil { + return err + } + } else { // if it is nil call with empty bytearray + err := o.ValidateServerConfig(byteArray, "TLSCA") + if err != nil { + return err + } + } + + return nil +} + +func (o *Override) ValidateServerConfig(byteArray *[]byte, configType string) error { + if byteArray == nil { + return errors.New(fmt.Sprintf("Failed to provide database configuration for %s to support greater than 1 replicas", configType)) + } + + overrides := &cav1.ServerConfig{} + err := json.Unmarshal(*byteArray, overrides) + if err != nil { + return err + } + + if overrides.DB != nil { + if overrides.DB.Type != "postgres" { + return errors.New(fmt.Sprintf("DB Type in %s config override should be `postgres` to allow replicas > 1", configType)) + } + + if overrides.DB.Datasource == "" { + return errors.New(fmt.Sprintf("Datasource in %s config override should not be empty to allow replicas > 1", configType)) + } + } + + return nil +} + +func hsmInitContainer(instance *current.IBPCA, hsmConfig *config.HSMConfig) *container.Container { + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + f := false + user := int64(0) + mountPath := "/shared" + cont := &container.Container{ + Container: &corev1.Container{ + Name: HSMCLIENT, + Image: fmt.Sprintf("%s:%s", instance.Spec.Images.HSMImage, instance.Spec.Images.HSMTag), + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: instance.GetResource("init"), + }, + } + + return cont +} + +func hsmSettings(instance *current.IBPCA, hsmConfig *config.HSMConfig, caCont container.Container, deployment *deployment.Deployment) { + caCont.Command = []string{ + "sh", + "-c", + "mkdir -p /data/tlsca && cp /config/tlsca/fabric-ca-server-config.yaml /data/tlsca && mkdir -p /data/ca && cp /config/ca/fabric-ca-server-config.yaml /data/ca && fabric-ca-server start --home /data/ca", + } + + // Add volumes from HSM config to deployment container + for _, v := range hsmConfig.GetVolumes() { + deployment.AppendVolumeIfMissing(v) + } + + // Add volume mounts from HSM config to CA container + for _, vm := range hsmConfig.GetVolumeMounts() { + caCont.AppendVolumeMountStructIfMissing(vm) + } + + // Add environment variables from HSM config to CA container + for _, env := range hsmConfig.GetEnvs() { + caCont.AppendEnvStructIfMissing(env) + } + + caCont.AppendVolumeMountWithSubPathIfMissing("shared", "/hsm/lib", "hsm") + + // If a pull secret is required to pull HSM library image, update the deployment's image pull secrets + if hsmConfig.Library.Auth != nil { + deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendPullSecretIfMissing( + deployment.Spec.Template.Spec.ImagePullSecrets, + hsmConfig.Library.Auth.ImagePullSecret, + ) + } + + // Add HSM init container to deployment, the init container is responsible for copying over HSM + // client library to the path expected by the CA + deployment.AddInitContainer(*hsmInitContainer(instance, hsmConfig)) + + // If daemon settings are configured in HSM config, create a sidecar that is running the daemon image + if hsmConfig.Daemon != nil { + hsmDaemonSettings(instance, hsmConfig, caCont, deployment) + } +} + +func hsmDaemonSettings(instance *current.IBPCA, hsmConfig *config.HSMConfig, caCont container.Container, deployment *deployment.Deployment) { + // Unable to launch daemon if not running priviledged moe + t := true + caCont.SecurityContext.Privileged = &t + caCont.SecurityContext.AllowPrivilegeEscalation = &t + + // Update command in deployment to ensure that deamon is running before starting the ca + caCont.Command = []string{ + "sh", + "-c", + config.DAEMON_CHECK_CMD + " && mkdir -p /data/tlsca && cp /config/tlsca/fabric-ca-server-config.yaml /data/tlsca && mkdir -p /data/ca && cp /config/ca/fabric-ca-server-config.yaml /data/ca && fabric-ca-server start --home /data/ca", + } + + // This is the shared volume where the file 'pkcsslotd-luanched' is touched to let + // other containers know that the daemon has successfully launched. + caCont.AppendVolumeMountIfMissing("shared", "/shared") + + pvcVolumeName := "fabric-ca" + // Certain token information requires to be stored in persistent store, the administrator + // responsible for configuring HSM sets the HSM config to point to the path where the PVC + // needs to be mounted. + var pvcMount *corev1.VolumeMount + for _, vm := range hsmConfig.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: pvcVolumeName, + MountPath: vm.MountPath, + } + } + } + + // If a pull secret is required to pull daemon image, update the deployment's image pull secrets + if hsmConfig.Daemon.Auth != nil { + deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendPullSecretIfMissing( + deployment.Spec.Template.Spec.ImagePullSecrets, + hsmConfig.Daemon.Auth.ImagePullSecret, + ) + } + + // Add daemon container to the deployment + config.AddDaemonContainer(hsmConfig, deployment, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the ca container + // and PVC volume to deployment if missing + if pvcMount != nil { + caCont.AppendVolumeMountStructIfMissing(*pvcMount) + deployment.AppendPVCVolumeIfMissing(pvcVolumeName, instance.PVCName()) + } +} diff --git a/pkg/offering/base/ca/override/deployment_test.go b/pkg/offering/base/ca/override/deployment_test.go new file mode 100644 index 00000000..28653c3e --- /dev/null +++ b/pkg/offering/base/ca/override/deployment_test.go @@ -0,0 +1,890 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "context" + "encoding/json" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +var _ = Describe("Deployment Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPCA + deployment *appsv1.Deployment + mockKubeClient *mocks.Client + ) + + BeforeEach(func() { + var err error + + mockKubeClient = &mocks.Client{} + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + hsmConfig := &config.HSMConfig{ + Type: "hsm", + Version: "v1", + MountPaths: []config.MountPath{ + config.MountPath{ + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + { + Key: "cert.pem", + Path: "cert.pem", + }, + { + Key: "key.pem", + Path: "key.pem", + }, + { + Key: "server.pem", + Path: "server.pem", + }, + }, + }, + config.MountPath{ + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + Paths: []config.Path{ + { + Key: "Chrystoki.conf", + Path: "Chrystoki.conf", + }, + }, + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "env1", + Value: "env1value", + }, + }, + } + + configBytes, err := yaml.Marshal(hsmConfig) + if err != nil { + return err + } + o := obj.(*corev1.ConfigMap) + o.Data = map[string]string{"ibp-hsm-config.yaml": string(configBytes)} + } + return nil + } + + overrider = &override.Override{ + Client: mockKubeClient, + } + deployment, err = util.GetDeploymentFromFile("../../../../../definitions/ca/deployment.yaml") + Expect(err).NotTo(HaveOccurred()) + deployment.Spec.Template.Spec.InitContainers[0].Image = "fake-init-image:1234" + deployment.Spec.Template.Spec.Containers[0].Image = "fake-ca-image:1234" + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPCASpec{ + License: current.License{ + Accept: true, + }, + Storage: ¤t.CAStorages{}, + Service: ¤t.Service{}, + Images: ¤t.CAImages{ + CAImage: "ca-image", + CAInitImage: "init-image", + }, + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + ImagePullSecrets: []string{"pullsecret"}, + Resources: ¤t.CAResources{ + CA: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.6m"), + corev1.ResourceMemory: resource.MustParse("0.4m"), + corev1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.7m"), + corev1.ResourceMemory: resource.MustParse("0.5m"), + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), + }, + }, + }, + }, + } + }) + + When("creating a new deployment", func() { + It("returns an error if license is not accepted", func() { + instance.Spec.License.Accept = false + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("user must accept license before continuing")) + }) + + It("overrides values in deployment based on CA's instance spec", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service account name to be name of CA instance", func() { + Expect(deployment.Spec.Template.Spec.ServiceAccountName).To(Equal(instance.Name)) + }) + + By("setting image pull secret", func() { + Expect(deployment.Spec.Template.Spec.ImagePullSecrets[0].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + }) + + By("setting resources", func() { + updated, err := util.GetResourcePatch(&corev1.ResourceRequirements{}, instance.Spec.Resources.CA) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.Containers[0].Resources).To(Equal(*updated)) + }) + + By("setting affinity", func() { + affinity := corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "kubernetes.io/arch", + Operator: corev1.NodeSelectorOpIn, + Values: instance.Spec.Arch, + }, + corev1.NodeSelectorRequirement{ + Key: "topology.kubernetes.io/zone", + Operator: corev1.NodeSelectorOpIn, + Values: []string{instance.Spec.Zone}, + }, + corev1.NodeSelectorRequirement{ + Key: "topology.kubernetes.io/region", + Operator: corev1.NodeSelectorOpIn, + Values: []string{instance.Spec.Region}, + }, + }, + }, + corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "failure-domain.beta.kubernetes.io/zone", + Operator: corev1.NodeSelectorOpIn, + Values: []string{instance.Spec.Zone}, + }, + corev1.NodeSelectorRequirement{ + Key: "failure-domain.beta.kubernetes.io/region", + Operator: corev1.NodeSelectorOpIn, + Values: []string{instance.Spec.Region}, + }, + }, + }, + }, + }, + }, + } + affinity.PodAntiAffinity = &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{instance.Name}, + }, + }, + }, + TopologyKey: "topology.kubernetes.io/zone", + }, + }, + corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{instance.Name}, + }, + }, + }, + TopologyKey: "failure-domain.beta.kubernetes.io/zone", + }, + }, + }, + } + Expect(*deployment.Spec.Template.Spec.Affinity).To(Equal(affinity)) + }) + + Context("volumes", func() { + + By("creating a ca crypto volume", func() { + volume := corev1.Volume{ + Name: "ca-crypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: instance.Name + "-ca-crypto", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(volume)) + }) + + By("creating a tlsca crypto volume", func() { + volume := corev1.Volume{ + Name: "tlsca-crypto", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: instance.Name + "-tlsca-crypto", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(volume)) + }) + + By("creating a ca config volume", func() { + volume := corev1.Volume{ + Name: "ca-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Name + "-ca-config", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(volume)) + }) + + By("creating a tlsca config volume", func() { + volume := corev1.Volume{ + Name: "tlsca-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Name + "-tlsca-config", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(volume)) + }) + }) + }) + + Context("images", func() { + When("no tag is passed", func() { + It("uses 'latest' for image tags", func() { + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("fake-init-image:1234")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("fake-ca-image:1234")) + + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("ca-image:latest")) + }) + }) + + When("tag is passed", func() { + It("uses the passed in tag for image tags", func() { + instance.Spec.Images.CAInitTag = "2.0.0" + instance.Spec.Images.CATag = "1.0.0" + + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:2.0.0")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("ca-image:1.0.0")) + }) + }) + }) + + Context("database overrides", func() { + When("not using postgres", func() { + It("performs overrides", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("creating a PVC volume", func() { + volume := corev1.Volume{ + Name: "fabric-ca", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.Name + "-pvc", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(volume)) + }) + + By("creating a volume mount for both init and ca containers", func() { + volumeMount := corev1.VolumeMount{ + Name: "fabric-ca", + MountPath: "/data", + SubPath: "fabric-ca-server", + } + Expect(deployment.Spec.Template.Spec.InitContainers[0].VolumeMounts).To(ContainElement(volumeMount)) + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(volumeMount)) + }) + }) + }) + + When("using postgres", func() { + BeforeEach(func() { + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{}, + TLSCA: &runtime.RawExtension{}, + } + + caConfig := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + + caConfigJson, err := util.ConvertToJsonMessage(caConfig) + Expect(err).NotTo(HaveOccurred()) + instance.Spec.ConfigOverride.CA = &runtime.RawExtension{Raw: *caConfigJson} + }) + + It("performs overrides", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("creating a volume mount for both init and ca containers", func() { + volumeMount := corev1.VolumeMount{ + Name: "shared", + MountPath: "/data", + } + Expect(deployment.Spec.Template.Spec.InitContainers[0].VolumeMounts).To(ContainElement(volumeMount)) + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(volumeMount)) + }) + + By("setting strategy to rolling update", func() { + Expect(deployment.Spec.Strategy.Type).To(Equal(appsv1.RollingUpdateDeploymentStrategyType)) + }) + }) + }) + }) + + Context("replicas is greater than 1", func() { + BeforeEach(func() { + replicas := int32(2) + instance.Spec.Replicas = &replicas + }) + + It("returns an error if db is not set in CA override", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Failed to provide override configuration to support greater than 1 replicas")) + + }) + + It("returns an error if db is set to not equal postgres in CA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "mysql", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + } + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("DB Type in CA config override should be `postgres` to allow replicas > 1")) + }) + + It("returns an error if datasource is empty in CA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + TLSCA: &runtime.RawExtension{Raw: *caJson}, + } + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Datasource in CA config override should not be empty to allow replicas > 1")) + }) + + It("returns an error if db is not set in TLSCA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "datasource", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + } + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Failed to provide database configuration for TLSCA to support greater than 1 replicas")) + }) + + It("returns an error if db is set to not equal postgres in TLSCA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "mysql", + }, + }, + } + tlscaJson, err := util.ConvertToJsonMessage(tlsca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + TLSCA: &runtime.RawExtension{Raw: *tlscaJson}, + } + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("DB Type in TLSCA config override should be `postgres` to allow replicas > 1")) + }) + + It("returns an error if datasource is empty in TLSCA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + tlscaJson, err := util.ConvertToJsonMessage(tlsca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + TLSCA: &runtime.RawExtension{Raw: *tlscaJson}, + } + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Datasource in TLSCA config override should not be empty to allow replicas > 1")) + }) + + It("returns no error if db is set to postgres", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + caBytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + caJson := json.RawMessage(caBytes) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + tlscaBytes, err := json.Marshal(tlsca) + Expect(err).NotTo(HaveOccurred()) + tlscaJson := json.RawMessage(tlscaBytes) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: caJson}, + TLSCA: &runtime.RawExtension{Raw: tlscaJson}, + } + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Strategy.Type).To(Equal(appsv1.RollingUpdateDeploymentStrategyType)) + }) + }) + + Context("Replicas is nil", func() { + It("returns success", func() { + instance.Spec.Replicas = nil + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + When("updating a deployment", func() { + Context("images", func() { + var image *current.CAImages + + BeforeEach(func() { + image = ¤t.CAImages{ + CAImage: "ca-image", + CAInitImage: "init-image", + } + instance.Spec.Images = image + }) + + When("no tag is passed", func() { + It("uses 'latest' for image tags", func() { + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("fake-init-image:1234")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("fake-ca-image:1234")) + + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("ca-image:latest")) + }) + }) + + When("tag is passed", func() { + It("uses the passed in tag for image tags", func() { + image.CATag = "1.0.0" + image.CAInitTag = "2.0.0" + + err := overrider.Deployment(instance, deployment, resources.Update) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:2.0.0")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("ca-image:1.0.0")) + }) + }) + }) + }) + + Context("replicas is greater than 1", func() { + BeforeEach(func() { + replicas := int32(2) + instance.Spec.Replicas = &replicas + }) + + It("returns an error if db is not set in CA override", func() { + err := overrider.Deployment(instance, deployment, resources.Update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Failed to provide override configuration to support greater than 1 replicas")) + + }) + + It("returns an error if db is set to not equal postgres in CA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "mysql", + }, + }, + } + caBytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + caJson := json.RawMessage(caBytes) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: caJson}, + } + err = overrider.Deployment(instance, deployment, resources.Update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("DB Type in CA config override should be `postgres` to allow replicas > 1")) + }) + + It("returns an error if datasource is empty in CA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + caBytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + tlscaBytes, err := json.Marshal(tlsca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: caBytes}, + TLSCA: &runtime.RawExtension{Raw: tlscaBytes}, + } + + err = overrider.Deployment(instance, deployment, resources.Update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Datasource in CA config override should not be empty to allow replicas > 1")) + }) + + It("returns an error if db is not set in TLSCA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "datasource", + }, + }, + } + caBytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: caBytes}, + } + + err = overrider.Deployment(instance, deployment, resources.Update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Failed to provide database configuration for TLSCA to support greater than 1 replicas")) + }) + + It("returns an error if db is set to not equal postgres in TLSCA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + caBytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "mysql", + }, + }, + } + tlscaBytes, err := json.Marshal(tlsca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: caBytes}, + TLSCA: &runtime.RawExtension{Raw: tlscaBytes}, + } + + err = overrider.Deployment(instance, deployment, resources.Update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("DB Type in TLSCA config override should be `postgres` to allow replicas > 1")) + }) + + It("returns an error if datasource is empty in TLSCA override", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + caBytes, err := json.Marshal(ca) + Expect(err).NotTo(HaveOccurred()) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + tlscaBytes, err := json.Marshal(tlsca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: caBytes}, + TLSCA: &runtime.RawExtension{Raw: tlscaBytes}, + } + + err = overrider.Deployment(instance, deployment, resources.Update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Datasource in TLSCA config override should not be empty to allow replicas > 1")) + }) + + It("returns no error if db is set to postgres", func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + tlsca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + Datasource: "fake", + }, + }, + } + tlscaJson, err := util.ConvertToJsonMessage(tlsca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + TLSCA: &runtime.RawExtension{Raw: *tlscaJson}, + } + + err = overrider.Deployment(instance, deployment, resources.Update) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Strategy.Type).To(Equal(appsv1.RollingUpdateDeploymentStrategyType)) + }) + }) + + Context("HSM", func() { + BeforeEach(func() { + ca := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + CSP: &v1.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &v1.PKCS11Opts{ + Label: "partition1", + Pin: "B6T9Q7mGNG", + }, + }, + }, + } + caJson, err := util.ConvertToJsonMessage(ca) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: *caJson}, + } + }) + + It("sets proxy env on ca container", func() { + instance.Spec.HSM = ¤t.HSM{PKCS11Endpoint: "1.2.3.4"} + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + d := dep.New(deployment) + Expect(d.MustGetContainer(override.CA).Env).To(ContainElement(corev1.EnvVar{ + Name: "PKCS11_PROXY_SOCKET", + Value: "1.2.3.4", + })) + }) + + It("configures deployment to use HSM init image", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + d := dep.New(deployment) + By("setting volume mounts", func() { + Expect(d.MustGetContainer(override.CA).VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + })) + + Expect(d.MustGetContainer(override.CA).VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + })) + }) + + By("setting env vars", func() { + Expect(d.MustGetContainer(override.CA).Env).To(ContainElement(corev1.EnvVar{ + Name: "env1", + Value: "env1value", + })) + }) + + By("creating HSM init container", func() { + Expect(d.ContainerExists("hsm-client")).To(Equal(true)) + }) + }) + }) +}) diff --git a/pkg/offering/base/ca/override/override.go b/pkg/offering/base/ca/override/override.go new file mode 100644 index 00000000..a91f5a5f --- /dev/null +++ b/pkg/offering/base/ca/override/override.go @@ -0,0 +1,167 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "encoding/json" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Override struct { + Client controllerclient.Client +} + +func (o *Override) IsPostgres(instance *current.IBPCA) bool { + if instance.Spec.ConfigOverride != nil { + if instance.Spec.ConfigOverride.CA != nil { + caOverrides := &v1.ServerConfig{} + err := json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + if err != nil { + return false + } + + if caOverrides.DB != nil { + if strings.ToLower(caOverrides.DB.Type) == "postgres" { + return true + } + } + } + + if instance.Spec.ConfigOverride.TLSCA != nil { + tlscaOverrides := &v1.ServerConfig{} + err := json.Unmarshal(instance.Spec.ConfigOverride.TLSCA.Raw, tlscaOverrides) + if err != nil { + return false + } + + if tlscaOverrides.DB != nil { + if strings.ToLower(tlscaOverrides.DB.Type) == "postgres" { + return true + } + } + } + } + + return false +} + +func (o *Override) GetAffinity(instance *current.IBPCA) *corev1.Affinity { + affinity := &corev1.Affinity{} + + affinity.NodeAffinity = o.GetNodeAffinity(instance) + affinity.PodAntiAffinity = o.GetPodAntiAffinity(instance) + + return affinity +} + +func (o *Override) GetNodeAffinity(instance *current.IBPCA) *corev1.NodeAffinity { + arch := instance.Spec.Arch + zone := instance.Spec.Zone + region := instance.Spec.Region + + nodeSelectorTerms := []corev1.NodeSelectorTerm{ + corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{}, + }, + corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{}, + }, + } + common.AddArchSelector(arch, &nodeSelectorTerms) + + if !o.IsPostgres(instance) { + common.AddZoneSelector(zone, &nodeSelectorTerms) + common.AddRegionSelector(region, &nodeSelectorTerms) + } + + if len(nodeSelectorTerms[0].MatchExpressions) != 0 { + return &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: nodeSelectorTerms, + }, + } + } + + return nil +} + +func (o *Override) GetPodAntiAffinity(instance *current.IBPCA) *corev1.PodAntiAffinity { + antiaffinity := &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{instance.GetName()}, + }, + }, + }, + TopologyKey: "topology.kubernetes.io/zone", + }, + }, + corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{instance.GetName()}, + }, + }, + }, + TopologyKey: "failure-domain.beta.kubernetes.io/zone", + }, + }, + }, + } + + if o.IsPostgres(instance) { + term := corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{instance.GetName()}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + } + antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution, term) + } + + return antiaffinity +} diff --git a/pkg/offering/base/ca/override/override_suite_test.go b/pkg/offering/base/ca/override/override_suite_test.go new file mode 100644 index 00000000..e44a1014 --- /dev/null +++ b/pkg/offering/base/ca/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} diff --git a/pkg/offering/base/ca/override/override_test.go b/pkg/offering/base/ca/override/override_test.go new file mode 100644 index 00000000..3d02d44e --- /dev/null +++ b/pkg/offering/base/ca/override/override_test.go @@ -0,0 +1,119 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "encoding/json" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" +) + +var _ = Describe("Base CA Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPCA + ) + + BeforeEach(func() { + overrider = &override.Override{} + }) + + Context("Affnity", func() { + BeforeEach(func() { + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + }, + } + instance.Name = "ca1" + }) + + It("returns an proper affinity when arch is passed", func() { + instance.Spec.Arch = []string{"test-arch"} + a := overrider.GetAffinity(instance) + + By("setting node affinity", func() { + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"test-arch"})) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Values).To(Equal([]string{"dal"})) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[2].Values).To(Equal([]string{"us-south"})) + }) + + By("setting pod anti affinity", func() { + Expect(a.PodAntiAffinity).NotTo(BeNil()) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"ca1"})) + }) + }) + + It("returns a proper affinity when no arch is passed", func() { + instance.Spec.Arch = []string{} + a := overrider.GetAffinity(instance) + Expect(a.NodeAffinity).NotTo(BeNil()) + + By("setting node affinity", func() { + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"dal"})) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Values).To(Equal([]string{"us-south"})) + }) + + By("setting pod anti affinity", func() { + Expect(a.PodAntiAffinity).NotTo(BeNil()) + Expect(len(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution)).To(Equal(2)) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"ca1"})) + }) + }) + + It("returns a proper affinity for postgres CA", func() { + caOverrides := &v1.ServerConfig{ + CAConfig: v1.CAConfig{ + DB: &v1.CAConfigDB{ + Type: "postgres", + }, + }, + } + bytes, err := json.Marshal(caOverrides) + Expect(err).NotTo(HaveOccurred()) + rawMessage := json.RawMessage(bytes) + instance.Spec.ConfigOverride = ¤t.ConfigOverride{ + CA: &runtime.RawExtension{Raw: rawMessage}, + } + + a := overrider.GetAffinity(instance) + + By("not setting zone or region in node affinity", func() { + Expect(a.NodeAffinity).NotTo(BeNil()) + Expect(len(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)).To(Equal(1)) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"test-arch"})) + }) + + By("setting pod anti affinity with hostname topology key", func() { + Expect(a.PodAntiAffinity).NotTo(BeNil()) + Expect(len(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution)).To(Equal(3)) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"ca1"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].PodAffinityTerm.TopologyKey).To(Equal("kubernetes.io/hostname")) + }) + }) + }) +}) diff --git a/pkg/offering/base/ca/override/overridecm.go b/pkg/offering/base/ca/override/overridecm.go new file mode 100644 index 00000000..e03bb6f9 --- /dev/null +++ b/pkg/offering/base/ca/override/overridecm.go @@ -0,0 +1,64 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "encoding/json" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) OverrideCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateOverrideCM(instance, cm) + case resources.Update: + return o.UpdateOverrideCM(instance, cm) + } + + return nil +} + +func (o *Override) CreateOverrideCM(instance *current.IBPCA, cm *corev1.ConfigMap) error { + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.CA != nil { + bytes, err := json.Marshal(instance.Spec.ConfigOverride.CA) + if err != nil { + return err + } + cm.Data["ca-config.yaml"] = string(bytes) + } + + if instance.Spec.ConfigOverride != nil && instance.Spec.ConfigOverride.TLSCA != nil { + bytes, err := json.Marshal(instance.Spec.ConfigOverride.TLSCA) + if err != nil { + return err + } + cm.Data["tlsca-config.yaml"] = string(bytes) + } + + return nil +} + +func (o *Override) UpdateOverrideCM(instance *current.IBPCA, cm *corev1.ConfigMap) error { + return nil +} diff --git a/pkg/offering/base/ca/override/pvc.go b/pkg/offering/base/ca/override/pvc.go new file mode 100644 index 00000000..7ae508ac --- /dev/null +++ b/pkg/offering/base/ca/override/pvc.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) PVC(object v1.Object, pvc *corev1.PersistentVolumeClaim, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreatePVC(instance, pvc) + case resources.Update: + return o.UpdatePVC(instance, pvc) + } + + return nil +} + +func (o *Override) CreatePVC(instance *current.IBPCA, pvc *corev1.PersistentVolumeClaim) error { + storage := instance.Spec.Storage + if storage != nil { + caStorage := storage.CA + if caStorage != nil { + if caStorage.Class != "" { + pvc.Spec.StorageClassName = &caStorage.Class + } + if caStorage.Size != "" { + quantity, err := resource.ParseQuantity(caStorage.Size) + if err != nil { + return err + } + resourceMap := pvc.Spec.Resources.Requests + if resourceMap == nil { + resourceMap = corev1.ResourceList{} + } + resourceMap[corev1.ResourceStorage] = quantity + pvc.Spec.Resources.Requests = resourceMap + } + } + } + + if pvc.ObjectMeta.Labels == nil { + pvc.ObjectMeta.Labels = map[string]string{} + } + if instance.Spec.Zone != "" { + pvc.ObjectMeta.Labels["zone"] = instance.Spec.Zone + } + + if instance.Spec.Region != "" { + pvc.ObjectMeta.Labels["region"] = instance.Spec.Region + } + + return nil +} + +func (o *Override) UpdatePVC(instance *current.IBPCA, pvc *corev1.PersistentVolumeClaim) error { + return nil +} diff --git a/pkg/offering/base/ca/override/pvc_test.go b/pkg/offering/base/ca/override/pvc_test.go new file mode 100644 index 00000000..1db20cde --- /dev/null +++ b/pkg/offering/base/ca/override/pvc_test.go @@ -0,0 +1,92 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("PVC Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPCA + pvc *corev1.PersistentVolumeClaim + ) + + BeforeEach(func() { + var err error + + overrider = &override.Override{} + pvc, err = util.GetPVCFromFile("../../../../../definitions/ca/pvc.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPCASpec{ + Storage: ¤t.CAStorages{ + CA: ¤t.StorageSpec{ + Size: "200M", + Class: "not-manual", + }, + }, + Region: "fakeregion", + Zone: "fakezone", + }, + } + }) + + Context("creating a new pvc", func() { + It("returns an error if improperly formatted value for size is used", func() { + instance.Spec.Storage.CA.Size = "123b" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("quantities must match the regular expression")) + }) + + It("overrides values in pvc, based on CA's instance spec", func() { + Expect(pvc.Spec.StorageClassName).To(BeNil()) + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting the labels for zone and region", func() { + Expect(pvc.ObjectMeta.Labels["region"]).To(Equal("fakeregion")) + Expect(pvc.ObjectMeta.Labels["zone"]).To(Equal("fakezone")) + }) + + By("setting the storage class name and size", func() { + Expect(*pvc.Spec.StorageClassName).To(Equal("not-manual")) + q := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + Expect(q.String()).To(Equal("200M")) + }) + + }) + }) +}) diff --git a/pkg/offering/base/ca/override/role.go b/pkg/offering/base/ca/override/role.go new file mode 100644 index 00000000..9ba229d0 --- /dev/null +++ b/pkg/offering/base/ca/override/role.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + rbacv1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Role(object v1.Object, role *rbacv1.Role, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateRole(instance, role) + case resources.Update: + return o.UpdateRole(instance, role) + } + + return nil +} + +func (o *Override) CreateRole(instance *current.IBPCA, rb *rbacv1.Role) error { + return nil +} + +func (o *Override) UpdateRole(instance *current.IBPCA, rb *rbacv1.Role) error { + return nil +} diff --git a/pkg/offering/base/ca/override/rolebinding.go b/pkg/offering/base/ca/override/rolebinding.go new file mode 100644 index 00000000..37c29460 --- /dev/null +++ b/pkg/offering/base/ca/override/rolebinding.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + rbacv1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) RoleBinding(object v1.Object, rb *rbacv1.RoleBinding, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateRoleBinding(instance, rb) + case resources.Update: + return o.UpdateRoleBinding(instance, rb) + } + + return nil +} + +func (o *Override) CreateRoleBinding(instance *current.IBPCA, rb *rbacv1.RoleBinding) error { + return nil +} + +func (o *Override) UpdateRoleBinding(instance *current.IBPCA, rb *rbacv1.RoleBinding) error { + return nil +} diff --git a/pkg/offering/base/ca/override/service.go b/pkg/offering/base/ca/override/service.go new file mode 100644 index 00000000..4e9c4a39 --- /dev/null +++ b/pkg/offering/base/ca/override/service.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/version" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Service(object v1.Object, service *corev1.Service, action resources.Action) error { + switch action { + case resources.Create: + return o.CreateService(object, service) + case resources.Update: + return o.UpdateService(object, service) + } + + return nil +} + +func (o *Override) CreateService(instance v1.Object, service *corev1.Service) error { + + switch instance.(type) { + case *current.IBPCA: + i := instance.(*current.IBPCA) + + if i.Status.Version == "" || i.Status.Version == version.V210 { + return o.CreateServiceV210(instance.(*current.IBPCA), service) + } else { + return o.CreateServiceV213(instance.(*current.IBPCA), service) + } + } + + return nil +} + +func (o *Override) CreateServiceV213(instance *current.IBPCA, service *corev1.Service) error { + if instance.Spec.Service != nil { + serviceType := instance.Spec.Service.Type + if serviceType != "" { + service.Spec.Type = serviceType + } + } + + return nil +} + +func (o *Override) CreateServiceV210(instance *current.IBPCA, service *corev1.Service) error { + if instance.Spec.Service != nil { + serviceType := instance.Spec.Service.Type + if serviceType != "" { + service.Spec.Type = serviceType + } + } + + return nil +} + +func (o *Override) UpdateService(instance v1.Object, service *corev1.Service) error { + return nil +} diff --git a/pkg/offering/base/ca/override/service_test.go b/pkg/offering/base/ca/override/service_test.go new file mode 100644 index 00000000..b6361184 --- /dev/null +++ b/pkg/offering/base/ca/override/service_test.go @@ -0,0 +1,71 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Service Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPCA + service *corev1.Service + ) + + BeforeEach(func() { + var err error + + overrider = &override.Override{} + service, err = util.GetServiceFromFile("../../../../../definitions/ca/service.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPCASpec{ + Service: ¤t.Service{ + Type: corev1.ServiceTypeClusterIP, + }, + }, + } + }) + + Context("creating a new service", func() { + It("overrides values in service, based on CA's instance spec", func() { + err := overrider.Service(instance, service, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting the service type", func() { + Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + }) + }) + }) +}) diff --git a/pkg/offering/base/ca/override/serviceaccount.go b/pkg/offering/base/ca/override/serviceaccount.go new file mode 100644 index 00000000..800c4986 --- /dev/null +++ b/pkg/offering/base/ca/override/serviceaccount.go @@ -0,0 +1,58 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) ServiceAccount(object v1.Object, sa *corev1.ServiceAccount, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateServiceAccount(instance, sa) + case resources.Update: + return o.UpdateServiceAccount(instance, sa) + } + + return nil +} + +func (o *Override) CreateServiceAccount(instance *current.IBPCA, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) UpdateServiceAccount(instance *current.IBPCA, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) commonServiceAccount(instance *current.IBPCA, sa *corev1.ServiceAccount) error { + for _, pullSecret := range instance.Spec.ImagePullSecrets { + imagePullSecret := corev1.LocalObjectReference{ + Name: pullSecret, + } + + sa.ImagePullSecrets = append(sa.ImagePullSecrets, imagePullSecret) + } + + return nil +} diff --git a/pkg/offering/base/ca/override/serviceaccount_test.go b/pkg/offering/base/ca/override/serviceaccount_test.go new file mode 100644 index 00000000..2a3806aa --- /dev/null +++ b/pkg/offering/base/ca/override/serviceaccount_test.go @@ -0,0 +1,69 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Service Account Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPCA + sa *corev1.ServiceAccount + ) + + BeforeEach(func() { + var err error + + overrider = &override.Override{} + sa, err = util.GetServiceAccountFromFile("../../../../../definitions/ca/serviceaccount.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPCASpec{ + ImagePullSecrets: []string{"pullsecret1"}, + }, + } + }) + + Context("creating a new service account", func() { + It("overrides values in service account, based on CA's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + }) + }) + }) +}) diff --git a/pkg/offering/base/console/console.go b/pkg/offering/base/console/console.go new file mode 100644 index 00000000..a6dcccdd --- /dev/null +++ b/pkg/offering/base/console/console.go @@ -0,0 +1,682 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseconsole + +import ( + "context" + "fmt" + "os" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + k8sruntime "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("base_console") + +type Override interface { + Deployment(v1.Object, *appsv1.Deployment, resources.Action) error + Service(v1.Object, *corev1.Service, resources.Action) error + DeployerService(v1.Object, *corev1.Service, resources.Action) error + ServiceAccount(v1.Object, *corev1.ServiceAccount, resources.Action) error + PVC(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error + CM(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error + ConsoleCM(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error + DeployerCM(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error +} + +//go:generate counterfeiter -o mocks/update.go -fake-name Update . Update + +type Update interface { + SpecUpdated() bool + DeployerCMUpdated() bool + ConsoleCMUpdated() bool + EnvCMUpdated() bool + RestartNeeded() bool +} + +//go:generate counterfeiter -o mocks/restart_manager.go -fake-name RestartManager . RestartManager + +type RestartManager interface { + ForConfigMapUpdate(instance v1.Object) error + TriggerIfNeeded(instance restart.Instance) error + ForRestartAction(instance v1.Object) error +} + +type IBPConsole interface { + PreReconcileChecks(instance *current.IBPConsole) (bool, error) + CheckStates(instance *current.IBPConsole, update bool) error + ReconcileManagers(instance *current.IBPConsole, update bool) error + Reconcile(instance *current.IBPConsole, update Update) (common.Result, error) +} + +var _ IBPConsole = &Console{} + +type Console struct { + Client k8sclient.Client + Scheme *runtime.Scheme + Config *config.Config + + DeploymentManager resources.Manager + ServiceManager resources.Manager + DeployerServiceManager resources.Manager + PVCManager resources.Manager + ConfigMapManager resources.Manager + ConsoleConfigMapManager resources.Manager + DeployerConfigMapManager resources.Manager + RoleManager resources.Manager + RoleBindingManager resources.Manager + ServiceAccountManager resources.Manager + + Override Override + + Restart RestartManager +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config, o Override) *Console { + console := &Console{ + Client: client, + Scheme: scheme, + Config: config, + Override: o, + Restart: restart.New(client, config.Operator.Restart.WaitTime.Get(), config.Operator.Restart.Timeout.Get()), + } + + console.CreateManagers() + return console +} + +func (c *Console) CreateManagers() { + options := map[string]interface{}{} + + options["userid"] = util.GenerateRandomString(10) + options["password"] = util.GenerateRandomString(10) + + consoleConfig := c.Config.ConsoleInitConfig + + override := c.Override + resourceManager := resourcemanager.New(c.Client, c.Scheme) + c.DeploymentManager = resourceManager.CreateDeploymentManager("", override.Deployment, c.GetLabels, consoleConfig.DeploymentFile) + c.ServiceManager = resourceManager.CreateServiceManager("", override.Service, c.GetLabels, consoleConfig.ServiceFile) + c.DeployerServiceManager = resourceManager.CreateServiceManager("", override.Service, c.GetLabels, consoleConfig.DeployerServiceFile) + c.PVCManager = resourceManager.CreatePVCManager("", override.PVC, c.GetLabels, consoleConfig.PVCFile) + c.ConfigMapManager = resourceManager.CreateConfigMapManager("", override.CM, c.GetLabels, consoleConfig.CMFile, nil) + c.ConsoleConfigMapManager = resourceManager.CreateConfigMapManager("console", override.ConsoleCM, c.GetLabels, consoleConfig.ConsoleCMFile, options) + c.DeployerConfigMapManager = resourceManager.CreateConfigMapManager("deployer", override.DeployerCM, c.GetLabels, consoleConfig.DeployerCMFile, options) + c.RoleManager = resourceManager.CreateRoleManager("", nil, c.GetLabels, consoleConfig.RoleFile) + c.RoleBindingManager = resourceManager.CreateRoleBindingManager("", nil, c.GetLabels, consoleConfig.RoleBindingFile) + c.ServiceAccountManager = resourceManager.CreateServiceAccountManager("", nil, c.GetLabels, consoleConfig.ServiceAccountFile) +} + +func (c *Console) PreReconcileChecks(instance *current.IBPConsole) (bool, error) { + var maxNameLength *int + if instance.Spec.ConfigOverride != nil { + maxNameLength = instance.Spec.ConfigOverride.MaxNameLength + } + err := util.ValidationChecks(instance.TypeMeta, instance.ObjectMeta, "IBPConsole", maxNameLength) + if err != nil { + return false, err + } + + // check if all required values are passed + err = c.ValidateSpec(instance) + if err != nil { + return false, err + } + + zoneUpdated, err := c.SelectZone(instance) + if err != nil { + return false, err + } + + regionUpdated, err := c.SelectRegion(instance) + if err != nil { + return false, err + } + + passwordUpdated, err := c.CreatePasswordSecretIfRequired(instance) + if err != nil { + return false, err + } + + kubeconfigUpdated, err := c.CreateKubernetesSecretIfRequired(instance) + if err != nil { + return false, err + } + + connectionStringUpdated := c.CreateCouchdbCredentials(instance) + + update := passwordUpdated || zoneUpdated || regionUpdated || kubeconfigUpdated || connectionStringUpdated + + if update { + log.Info(fmt.Sprintf("passwordUpdated %t, zoneUpdated %t, regionUpdated %t, kubeconfigUpdated %t, connectionstringUpdated %t", + passwordUpdated, zoneUpdated, regionUpdated, kubeconfigUpdated, connectionStringUpdated)) + } + + return update, err +} + +func (c *Console) Reconcile(instance *current.IBPConsole, update Update) (common.Result, error) { + var err error + + versionSet, err := c.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := c.PreReconcileChecks(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Updating instance after pre reconcile checks") + err := c.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPConsole{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + log.Info("Reconciling managers ...") + err = c.ReconcileManagers(instance, update.SpecUpdated()) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = c.CheckStates(instance, update.SpecUpdated()) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + err = c.CheckForConfigMapUpdates(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check for config map updates") + } + + err = c.HandleActions(instance, update) + if err != nil { + return common.Result{}, err + } + + if err := c.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{}, nil +} + +func (c *Console) SetVersion(instance *current.IBPConsole) (bool, error) { + if instance.Status.Version == "" || !version.String(instance.Status.Version).Equal(version.Operator) { + log.Info("Version of Operator: ", "version", version.Operator) + log.Info("Version of CR: ", "version", instance.Status.Version) + log.Info(fmt.Sprintf("Setting '%s' to version '%s'", instance.Name, version.Operator)) + + instance.Status.Version = version.Operator + err := c.Client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPConsole{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (c *Console) ReconcileManagers(instance *current.IBPConsole, update bool) error { + var err error + + if strings.Contains(instance.Spec.ConnectionString, "localhost") || instance.Spec.ConnectionString == "" { + err = c.PVCManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed PVC reconciliation") + } + } + + err = c.ServiceManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Service reconciliation") + } + + if instance.Spec.FeatureFlags != nil && instance.Spec.FeatureFlags.DevMode { + c.DeployerServiceManager.SetCustomName(instance.GetName() + "-deployer-" + instance.Namespace) + err = c.DeployerServiceManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Deployer Service reconciliation") + } + } + + err = c.ReconcileRBAC(instance) + if err != nil { + return errors.Wrap(err, "failed RBAC reconciliation") + } + + err = c.ConfigMapManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed ConfigMap reconciliation") + } + + err = c.DeployerConfigMapManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Deployer ConfigMap reconciliation") + } + + err = c.ConsoleConfigMapManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Console ConfigMap reconciliation") + } + + err = c.DeploymentManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Deployment reconciliation") + } + + return nil +} + +func (c *Console) ReconcileRBAC(instance *current.IBPConsole) error { + var err error + + err = c.RoleManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = c.RoleBindingManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = c.ServiceAccountManager.Reconcile(instance, false) + if err != nil { + return err + } + + return nil +} + +func (c *Console) CheckStates(instance *current.IBPConsole, update bool) error { + // Don't need to check state if the state is being updated via CR. State needs + // to be checked if operator detects changes to a resources that was not triggered + // via CR. + if c.DeploymentManager.Exists(instance) { + err := c.DeploymentManager.CheckState(instance) + if err != nil { + log.Info(fmt.Sprintf("unexpected state found for deployment, restoring state: %s", err.Error())) + err = c.DeploymentManager.RestoreState(instance) + if err != nil { + return err + } + } + } + + return nil +} + +func (c *Console) SelectZone(instance *current.IBPConsole) (bool, error) { + if instance.Spec.Zone == "select" { + zone := util.GetZone(c.Client) + instance.Spec.Zone = zone + return true, nil + } + if instance.Spec.Zone != "" { + err := util.ValidateZone(c.Client, instance.Spec.Zone) + if err != nil { + return false, err + } + } + return false, nil +} + +func (c *Console) SelectRegion(instance *current.IBPConsole) (bool, error) { + if instance.Spec.Region == "select" { + region := util.GetRegion(c.Client) + instance.Spec.Region = region + return true, nil + } + if instance.Spec.Region != "" { + err := util.ValidateRegion(c.Client, instance.Spec.Region) + if err != nil { + return false, err + } + } + return false, nil +} + +func (c *Console) CreatePasswordSecretIfRequired(instance *current.IBPConsole) (bool, error) { + namespace := instance.Namespace + passwordSecretName := instance.Spec.PasswordSecretName + password := instance.Spec.Password + + authscheme := instance.Spec.AuthScheme + + // if password is blank and passwordSecret is set + if password == "" && passwordSecretName != "" { + userSecret := &corev1.Secret{} + err := c.Client.Get(context.TODO(), types.NamespacedName{Name: passwordSecretName, Namespace: namespace}, userSecret) + if err != nil { + return false, errors.Wrap(err, "failed to get provided console password secret, password is blank & secret is set") + } + return false, nil + } + + if passwordSecretName == "" && authscheme == "ibmid" { + password = "unused" + } + + if password == "" && passwordSecretName == "" { + return false, errors.New("both password and password secret are NOT set") + } + + if passwordSecretName == "" && password != "" { + passwordSecretName = instance.Name + "-console-pw" + err := c.CreateUserSecret(instance, passwordSecretName, password) + if err != nil { + return false, errors.Wrap(err, "failed to create user secret") + } else { + instance.Spec.Password = "" + instance.Spec.PasswordSecretName = passwordSecretName + return true, nil + } + } + + return false, nil + +} + +func (c *Console) CreateKubernetesSecretIfRequired(instance *current.IBPConsole) (bool, error) { + namespace := instance.Namespace + kubeconfigsecretname := instance.Spec.KubeconfigSecretName + kubeconfig := instance.Spec.Kubeconfig + + // if password is blank and passwordSecret is set + if kubeconfig == nil && kubeconfigsecretname != "" { + kubeconfigSecret := &corev1.Secret{} + err := c.Client.Get(context.TODO(), types.NamespacedName{Name: kubeconfigsecretname, Namespace: namespace}, kubeconfigSecret) + if err != nil { + return false, errors.Wrap(err, "failed to get kubeconifg secret") + } + return false, nil + } + + if kubeconfigsecretname == "" && kubeconfig != nil && string(*kubeconfig) != "" { + kubeconfigsecretname = instance.Name + "-kubeconfig" + err := c.CreateKubeconfigSecret(instance, kubeconfigsecretname, kubeconfig) + if err != nil { + return false, errors.Wrap(err, "failed to create kubeconfig secret") + } else { + empty := make([]byte, 0) + instance.Spec.Kubeconfig = &empty + instance.Spec.KubeconfigSecretName = kubeconfigsecretname + return true, nil + } + } + + if kubeconfig != nil && string(*kubeconfig) != "" && kubeconfigsecretname != "" { + return false, errors.New("both kubeconfig and kubeconfig secret name are set") + } + + return false, nil +} + +func (c *Console) CreateKubeconfigSecret(instance *current.IBPConsole, kubeocnfigSecretName string, kubeconfig *[]byte) error { + kubeconfigSecret := &corev1.Secret{} + kubeconfigSecret.Name = kubeocnfigSecretName + kubeconfigSecret.Namespace = instance.Namespace + kubeconfigSecret.Labels = c.GetLabels(instance) + + kubeconfigSecret.Data = map[string][]byte{} + kubeconfigSecret.Data["kubeconfig.yaml"] = *kubeconfig + + err := c.Client.Create(context.TODO(), kubeconfigSecret, k8sclient.CreateOption{Owner: instance, Scheme: c.Scheme}) + if err != nil { + return err + } + + return nil +} + +func (c *Console) CreateUserSecret(instance *current.IBPConsole, passwordSecretName, password string) error { + userSecret := &corev1.Secret{} + userSecret.Name = passwordSecretName + userSecret.Namespace = instance.Namespace + userSecret.Labels = c.GetLabels(instance) + + userSecret.Data = map[string][]byte{} + userSecret.Data["password"] = []byte(password) + + err := c.Client.Create(context.TODO(), userSecret, k8sclient.CreateOption{Owner: instance, Scheme: c.Scheme}) + if err != nil { + return err + } + + return nil +} + +func (c *Console) ValidateSpec(instance *current.IBPConsole) error { + if instance.Spec.NetworkInfo == nil { + return errors.New("network information not provided") + } + + if instance.Spec.NetworkInfo.Domain == "" { + return errors.New("domain not provided in network information") + } + + if !instance.Spec.License.Accept { + return errors.New("user must accept license before continuing") + } + + if instance.Spec.ServiceAccountName == "" { + return errors.New("Service account name not provided") + } + + if instance.Spec.Email == "" { + return errors.New("email not provided") + } + + if instance.Spec.AuthScheme != "ibmid" && instance.Spec.Password == "" && instance.Spec.PasswordSecretName == "" { + return errors.New("password and passwordSecretName both not provided, at least one expected") + } + + if instance.Spec.ImagePullSecrets == nil || len(instance.Spec.ImagePullSecrets) == 0 { + return errors.New("imagepullsecrets required") + } + + if instance.Spec.RegistryURL != "" && !strings.HasSuffix(instance.Spec.RegistryURL, "/") { + instance.Spec.RegistryURL = instance.Spec.RegistryURL + "/" + } + + return nil +} + +func (c *Console) GetLabels(instance v1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app": instance.GetName(), + "creator": label, + "release": "operator", + "helm.sh/chart": "ibm-" + label, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "console", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (c *Console) HandleActions(instance *current.IBPConsole, update Update) error { + orig := instance.DeepCopy() + + if update.RestartNeeded() { + if err := c.Restart.ForRestartAction(instance); err != nil { + return errors.Wrap(err, "failed to restart console pods") + } + instance.ResetRestart() + } + + if err := c.Client.Patch(context.TODO(), instance, client.MergeFrom(orig)); err != nil { + return errors.Wrap(err, "failed to reset action flag") + } + + return nil +} + +func (c *Console) CreateCouchdbCredentials(instance *current.IBPConsole) bool { + if instance.Spec.ConnectionString != "" && instance.Spec.ConnectionString != "http://localhost:5984" { + return false + } + + couchdbUser := util.GenerateRandomString(32) + couchdbPassword := util.GenerateRandomString(32) + connectionString := fmt.Sprintf("http://%s:%s@localhost:5984", couchdbUser, couchdbPassword) + instance.Spec.ConnectionString = connectionString + // TODO save deployer docs for SW? + // instance.Spec.Deployer.ConnectionString = connectionString + + return true +} + +func (c *Console) CheckForConfigMapUpdates(instance *current.IBPConsole, update Update) error { + if update.DeployerCMUpdated() || update.ConsoleCMUpdated() || update.EnvCMUpdated() { + err := c.Restart.ForConfigMapUpdate(instance) + if err != nil { + return errors.Wrap(err, "failed to update restart config") + } + } + + return nil +} + +func (c *Console) HandleRestart(instance *current.IBPConsole, update Update) error { + // If restart is disabled for components, can return immediately + if c.Config.Operator.Restart.Disable.Components { + return nil + } + + err := c.Restart.TriggerIfNeeded(instance) + if err != nil { + return errors.Wrap(err, "failed to restart deployment") + } + + return nil +} + +func (c *Console) NetworkPolicyReconcile(instance *current.IBPConsole) error { + if c.Config.Operator.Console.ApplyNetworkPolicy == "" || c.Config.Operator.Console.ApplyNetworkPolicy == "false" { + return nil + } + + log.Info("IBPOPERATOR_CONSOLE_APPLYNETWORKPOLICY set applying network policy") + err := c.CreateNetworkPolicyIfNotExists(instance, c.Config.ConsoleInitConfig.NetworkPolicyIngressFile, instance.GetName()+"-ingress") + if err != nil { + log.Error(err, "Cannot install ingress network policy") + } + + err = c.CreateNetworkPolicyIfNotExists(instance, c.Config.ConsoleInitConfig.NetworkPolicyDenyAllFile, instance.GetName()+"-denyall") + if err != nil { + log.Error(err, "Cannot install denyall network policy") + } + + return nil +} + +func (c *Console) CreateNetworkPolicyIfNotExists(instance *current.IBPConsole, filename string, policyname string) error { + policy, err := util.GetNetworkPolicyFromFile(filename) + if err != nil { + return err + } + + policy.Namespace = instance.Namespace + policy.Name = policyname + policy.Spec.PodSelector.MatchLabels = c.GetLabelsForNetworkPolicy(instance) + + newPolicy := policy.DeepCopy() + err = c.Client.Get(context.TODO(), types.NamespacedName{Namespace: instance.Namespace, Name: instance.GetName()}, newPolicy) + if err != nil { + if k8sruntime.IgnoreNotFound(err) == nil { + log.Info("network policy not found, applying now") + err1 := c.Client.Create(context.TODO(), policy, k8sclient.CreateOption{Owner: instance, Scheme: c.Scheme}) + if err1 != nil { + log.Error(err1, "Error applying network policy") + } + } else { + log.Error(err, "Error getting network policy") + return nil + } + } else { + log.Info("network policy found, not applying") + return nil + } + return nil +} + +func (c *Console) GetLabelsForNetworkPolicy(instance v1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + return map[string]string{ + "app.kubernetes.io/name": label, + } +} diff --git a/pkg/offering/base/console/console_suite_test.go b/pkg/offering/base/console/console_suite_test.go new file mode 100644 index 00000000..413b7752 --- /dev/null +++ b/pkg/offering/base/console/console_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseconsole_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConsole(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Console Suite") +} diff --git a/pkg/offering/base/console/console_test.go b/pkg/offering/base/console/console_test.go new file mode 100644 index 00000000..9f13a525 --- /dev/null +++ b/pkg/offering/base/console/console_test.go @@ -0,0 +1,342 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseconsole_test + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/mocks" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var _ = Describe("Base Console", func() { + var ( + console *baseconsole.Console + instance *current.IBPConsole + mockKubeClient *cmocks.Client + + deploymentMgr *managermocks.ResourceManager + serviceMgr *managermocks.ResourceManager + deployerServiceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + configMapMgr *managermocks.ResourceManager + consoleConfigMapMgr *managermocks.ResourceManager + deployerConfigMapMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + update *mocks.Update + restartMgr *mocks.RestartManager + ) + + BeforeEach(func() { + logf.SetLogger(zap.New()) + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + restartMgr = &mocks.RestartManager{} + + deploymentMgr = &managermocks.ResourceManager{} + serviceMgr = &managermocks.ResourceManager{} + deployerServiceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + configMapMgr = &managermocks.ResourceManager{} + consoleConfigMapMgr = &managermocks.ResourceManager{} + deployerConfigMapMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ServiceAccountName: "test", + AuthScheme: "couchdb", + DeployerTimeout: 30000, + Components: "athena-components", + Sessions: "athena-sessions", + System: "athena-system", + Service: ¤t.Service{}, + Email: "xyz@ibm.com", + Password: "cGFzc3dvcmQ=", + SystemChannel: "testchainid", + ImagePullSecrets: []string{"testsecret"}, + RegistryURL: "ghcr.io/ibm-blockchain/ibp-temp/", + Kubeconfig: &[]byte{}, + ConnectionString: "https://localhost", + Images: ¤t.ConsoleImages{ + ConsoleInitImage: "fake-init-image", + ConsoleInitTag: "1234", + CouchDBImage: "fake-couchdb-image", + CouchDBTag: "1234", + ConsoleImage: "fake-console-image", + ConsoleTag: "1234", + ConfigtxlatorImage: "fake-configtxlator-image", + ConfigtxlatorTag: "1234", + DeployerImage: "fake-deployer-image", + DeployerTag: "1234", + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + TLSSecretName: "secret", + Resources: ¤t.ConsoleResources{}, + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + Versions: ¤t.Versions{}, + PasswordSecretName: "password", + }, + } + instance.Kind = "IBPConsole" + instance.Status.Version = version.Operator + + console = &baseconsole.Console{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: &config.Config{}, + + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + DeployerServiceManager: deployerServiceMgr, + PVCManager: pvcMgr, + ConfigMapManager: configMapMgr, + ConsoleConfigMapManager: consoleConfigMapMgr, + DeployerConfigMapManager: deployerConfigMapMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Restart: restartMgr, + } + }) + + Context("Reconciles", func() { + It("returns nil and request will be requeued if instance version is updated", func() { + instance.Status.Version = "" + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchStatusCallCount()).To(Equal(1)) + }) + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns no error if dev mode is disabled & deployer service manager fails to reconcile", func() { + deployerServiceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error if deployer service manager fails to reconcile", func() { + instance.Spec.FeatureFlags = &consolev1.FeatureFlags{ + DevMode: true, + } + deployerServiceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployer Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed RBAC reconciliation: failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed RBAC reconciliation: failed to reconcile role binding")) + }) + + It("returns an error if service account binding manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed RBAC reconciliation: failed to reconcile service account")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + configMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + consoleConfigMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Console ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + deployerConfigMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployer ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("restarts pods by deleting deployment", func() { + update.RestartNeededReturns(true) + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchCallCount()).To(Equal(1)) + }) + + It("returns error if trigger restart fails", func() { + restartMgr.TriggerIfNeededReturns(errors.New("failed to trigger restart")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to restart deployment: failed to trigger restart")) + }) + + It("does not return an error on a successful reconcile", func() { + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("ValidateSpec", func() { + It("returns no error if valid spec is passed", func() { + err := console.ValidateSpec(instance) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns error if license is not accepted", func() { + instance.Spec.License.Accept = false + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("user must accept license before continuing")) + }) + + It("returns error if serviceaccountname is not passed", func() { + instance.Spec.ServiceAccountName = "" + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Service account name not provided")) + }) + + It("returns error if email is not passed", func() { + instance.Spec.Email = "" + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("email not provided")) + }) + + It("returns error if password & passwordsecret are not passed", func() { + instance.Spec.PasswordSecretName = "" + instance.Spec.Password = "" + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("password and passwordSecretName both not provided, at least one expected")) + }) + + It("should not return error if password & passwordsecret are not passed when authscheme is ibmid", func() { + instance.Spec.AuthScheme = "ibmid" + instance.Spec.PasswordSecretName = "" + instance.Spec.Password = "" + err := console.ValidateSpec(instance) + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns error if imagepullsecret is not passed", func() { + instance.Spec.ImagePullSecrets = []string{} + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("imagepullsecrets required")) + }) + + It("returns error if ingress info are not passed", func() { + instance.Spec.NetworkInfo = nil + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("network information not provided")) + }) + }) + + Context("CreateCouchdbCredentials", func() { + It("does not update connectionstring if it is not blank", func() { + connectionString := "https://fake.url" + instance.Spec.ConnectionString = connectionString + updated := console.CreateCouchdbCredentials(instance) + Expect(updated).To(BeFalse()) + Expect(instance.Spec.ConnectionString).To(Equal(connectionString)) + }) + + It("does not update connectionstring if it is not blank & is https", func() { + connectionString := "https://localhost:5984" + instance.Spec.ConnectionString = connectionString + updated := console.CreateCouchdbCredentials(instance) + Expect(updated).To(BeFalse()) + Expect(instance.Spec.ConnectionString).To(Equal(connectionString)) + }) + + It("does update connectionstring if it is missing creds", func() { + connectionString := "http://localhost:5984" + instance.Spec.ConnectionString = connectionString + updated := console.CreateCouchdbCredentials(instance) + Expect(updated).To(BeTrue()) + Expect(instance.Spec.ConnectionString).NotTo(Equal(connectionString)) + }) + + It("does not update connectionstring if it is has creds already", func() { + connectionString := "http://user:pass@localhost:5984" + instance.Spec.ConnectionString = connectionString + updated := console.CreateCouchdbCredentials(instance) + Expect(updated).To(BeFalse()) + Expect(instance.Spec.ConnectionString).To(Equal(connectionString)) + }) + }) +}) diff --git a/pkg/offering/base/console/mocks/restart_manager.go b/pkg/offering/base/console/mocks/restart_manager.go new file mode 100644 index 00000000..8086c3e7 --- /dev/null +++ b/pkg/offering/base/console/mocks/restart_manager.go @@ -0,0 +1,261 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type RestartManager struct { + ForConfigMapUpdateStub func(v1.Object) error + forConfigMapUpdateMutex sync.RWMutex + forConfigMapUpdateArgsForCall []struct { + arg1 v1.Object + } + forConfigMapUpdateReturns struct { + result1 error + } + forConfigMapUpdateReturnsOnCall map[int]struct { + result1 error + } + ForRestartActionStub func(v1.Object) error + forRestartActionMutex sync.RWMutex + forRestartActionArgsForCall []struct { + arg1 v1.Object + } + forRestartActionReturns struct { + result1 error + } + forRestartActionReturnsOnCall map[int]struct { + result1 error + } + TriggerIfNeededStub func(restart.Instance) error + triggerIfNeededMutex sync.RWMutex + triggerIfNeededArgsForCall []struct { + arg1 restart.Instance + } + triggerIfNeededReturns struct { + result1 error + } + triggerIfNeededReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *RestartManager) ForConfigMapUpdate(arg1 v1.Object) error { + fake.forConfigMapUpdateMutex.Lock() + ret, specificReturn := fake.forConfigMapUpdateReturnsOnCall[len(fake.forConfigMapUpdateArgsForCall)] + fake.forConfigMapUpdateArgsForCall = append(fake.forConfigMapUpdateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForConfigMapUpdateStub + fakeReturns := fake.forConfigMapUpdateReturns + fake.recordInvocation("ForConfigMapUpdate", []interface{}{arg1}) + fake.forConfigMapUpdateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForConfigMapUpdateCallCount() int { + fake.forConfigMapUpdateMutex.RLock() + defer fake.forConfigMapUpdateMutex.RUnlock() + return len(fake.forConfigMapUpdateArgsForCall) +} + +func (fake *RestartManager) ForConfigMapUpdateCalls(stub func(v1.Object) error) { + fake.forConfigMapUpdateMutex.Lock() + defer fake.forConfigMapUpdateMutex.Unlock() + fake.ForConfigMapUpdateStub = stub +} + +func (fake *RestartManager) ForConfigMapUpdateArgsForCall(i int) v1.Object { + fake.forConfigMapUpdateMutex.RLock() + defer fake.forConfigMapUpdateMutex.RUnlock() + argsForCall := fake.forConfigMapUpdateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForConfigMapUpdateReturns(result1 error) { + fake.forConfigMapUpdateMutex.Lock() + defer fake.forConfigMapUpdateMutex.Unlock() + fake.ForConfigMapUpdateStub = nil + fake.forConfigMapUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForConfigMapUpdateReturnsOnCall(i int, result1 error) { + fake.forConfigMapUpdateMutex.Lock() + defer fake.forConfigMapUpdateMutex.Unlock() + fake.ForConfigMapUpdateStub = nil + if fake.forConfigMapUpdateReturnsOnCall == nil { + fake.forConfigMapUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forConfigMapUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartAction(arg1 v1.Object) error { + fake.forRestartActionMutex.Lock() + ret, specificReturn := fake.forRestartActionReturnsOnCall[len(fake.forRestartActionArgsForCall)] + fake.forRestartActionArgsForCall = append(fake.forRestartActionArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForRestartActionStub + fakeReturns := fake.forRestartActionReturns + fake.recordInvocation("ForRestartAction", []interface{}{arg1}) + fake.forRestartActionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForRestartActionCallCount() int { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + return len(fake.forRestartActionArgsForCall) +} + +func (fake *RestartManager) ForRestartActionCalls(stub func(v1.Object) error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = stub +} + +func (fake *RestartManager) ForRestartActionArgsForCall(i int) v1.Object { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + argsForCall := fake.forRestartActionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForRestartActionReturns(result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + fake.forRestartActionReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartActionReturnsOnCall(i int, result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + if fake.forRestartActionReturnsOnCall == nil { + fake.forRestartActionReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forRestartActionReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeeded(arg1 restart.Instance) error { + fake.triggerIfNeededMutex.Lock() + ret, specificReturn := fake.triggerIfNeededReturnsOnCall[len(fake.triggerIfNeededArgsForCall)] + fake.triggerIfNeededArgsForCall = append(fake.triggerIfNeededArgsForCall, struct { + arg1 restart.Instance + }{arg1}) + stub := fake.TriggerIfNeededStub + fakeReturns := fake.triggerIfNeededReturns + fake.recordInvocation("TriggerIfNeeded", []interface{}{arg1}) + fake.triggerIfNeededMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) TriggerIfNeededCallCount() int { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + return len(fake.triggerIfNeededArgsForCall) +} + +func (fake *RestartManager) TriggerIfNeededCalls(stub func(restart.Instance) error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = stub +} + +func (fake *RestartManager) TriggerIfNeededArgsForCall(i int) restart.Instance { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + argsForCall := fake.triggerIfNeededArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) TriggerIfNeededReturns(result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + fake.triggerIfNeededReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeededReturnsOnCall(i int, result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + if fake.triggerIfNeededReturnsOnCall == nil { + fake.triggerIfNeededReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.triggerIfNeededReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.forConfigMapUpdateMutex.RLock() + defer fake.forConfigMapUpdateMutex.RUnlock() + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *RestartManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseconsole.RestartManager = new(RestartManager) diff --git a/pkg/offering/base/console/mocks/update.go b/pkg/offering/base/console/mocks/update.go new file mode 100644 index 00000000..fcac5c0c --- /dev/null +++ b/pkg/offering/base/console/mocks/update.go @@ -0,0 +1,362 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" +) + +type Update struct { + ConsoleCMUpdatedStub func() bool + consoleCMUpdatedMutex sync.RWMutex + consoleCMUpdatedArgsForCall []struct { + } + consoleCMUpdatedReturns struct { + result1 bool + } + consoleCMUpdatedReturnsOnCall map[int]struct { + result1 bool + } + DeployerCMUpdatedStub func() bool + deployerCMUpdatedMutex sync.RWMutex + deployerCMUpdatedArgsForCall []struct { + } + deployerCMUpdatedReturns struct { + result1 bool + } + deployerCMUpdatedReturnsOnCall map[int]struct { + result1 bool + } + EnvCMUpdatedStub func() bool + envCMUpdatedMutex sync.RWMutex + envCMUpdatedArgsForCall []struct { + } + envCMUpdatedReturns struct { + result1 bool + } + envCMUpdatedReturnsOnCall map[int]struct { + result1 bool + } + RestartNeededStub func() bool + restartNeededMutex sync.RWMutex + restartNeededArgsForCall []struct { + } + restartNeededReturns struct { + result1 bool + } + restartNeededReturnsOnCall map[int]struct { + result1 bool + } + SpecUpdatedStub func() bool + specUpdatedMutex sync.RWMutex + specUpdatedArgsForCall []struct { + } + specUpdatedReturns struct { + result1 bool + } + specUpdatedReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Update) ConsoleCMUpdated() bool { + fake.consoleCMUpdatedMutex.Lock() + ret, specificReturn := fake.consoleCMUpdatedReturnsOnCall[len(fake.consoleCMUpdatedArgsForCall)] + fake.consoleCMUpdatedArgsForCall = append(fake.consoleCMUpdatedArgsForCall, struct { + }{}) + stub := fake.ConsoleCMUpdatedStub + fakeReturns := fake.consoleCMUpdatedReturns + fake.recordInvocation("ConsoleCMUpdated", []interface{}{}) + fake.consoleCMUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ConsoleCMUpdatedCallCount() int { + fake.consoleCMUpdatedMutex.RLock() + defer fake.consoleCMUpdatedMutex.RUnlock() + return len(fake.consoleCMUpdatedArgsForCall) +} + +func (fake *Update) ConsoleCMUpdatedCalls(stub func() bool) { + fake.consoleCMUpdatedMutex.Lock() + defer fake.consoleCMUpdatedMutex.Unlock() + fake.ConsoleCMUpdatedStub = stub +} + +func (fake *Update) ConsoleCMUpdatedReturns(result1 bool) { + fake.consoleCMUpdatedMutex.Lock() + defer fake.consoleCMUpdatedMutex.Unlock() + fake.ConsoleCMUpdatedStub = nil + fake.consoleCMUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConsoleCMUpdatedReturnsOnCall(i int, result1 bool) { + fake.consoleCMUpdatedMutex.Lock() + defer fake.consoleCMUpdatedMutex.Unlock() + fake.ConsoleCMUpdatedStub = nil + if fake.consoleCMUpdatedReturnsOnCall == nil { + fake.consoleCMUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.consoleCMUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) DeployerCMUpdated() bool { + fake.deployerCMUpdatedMutex.Lock() + ret, specificReturn := fake.deployerCMUpdatedReturnsOnCall[len(fake.deployerCMUpdatedArgsForCall)] + fake.deployerCMUpdatedArgsForCall = append(fake.deployerCMUpdatedArgsForCall, struct { + }{}) + stub := fake.DeployerCMUpdatedStub + fakeReturns := fake.deployerCMUpdatedReturns + fake.recordInvocation("DeployerCMUpdated", []interface{}{}) + fake.deployerCMUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) DeployerCMUpdatedCallCount() int { + fake.deployerCMUpdatedMutex.RLock() + defer fake.deployerCMUpdatedMutex.RUnlock() + return len(fake.deployerCMUpdatedArgsForCall) +} + +func (fake *Update) DeployerCMUpdatedCalls(stub func() bool) { + fake.deployerCMUpdatedMutex.Lock() + defer fake.deployerCMUpdatedMutex.Unlock() + fake.DeployerCMUpdatedStub = stub +} + +func (fake *Update) DeployerCMUpdatedReturns(result1 bool) { + fake.deployerCMUpdatedMutex.Lock() + defer fake.deployerCMUpdatedMutex.Unlock() + fake.DeployerCMUpdatedStub = nil + fake.deployerCMUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) DeployerCMUpdatedReturnsOnCall(i int, result1 bool) { + fake.deployerCMUpdatedMutex.Lock() + defer fake.deployerCMUpdatedMutex.Unlock() + fake.DeployerCMUpdatedStub = nil + if fake.deployerCMUpdatedReturnsOnCall == nil { + fake.deployerCMUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.deployerCMUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EnvCMUpdated() bool { + fake.envCMUpdatedMutex.Lock() + ret, specificReturn := fake.envCMUpdatedReturnsOnCall[len(fake.envCMUpdatedArgsForCall)] + fake.envCMUpdatedArgsForCall = append(fake.envCMUpdatedArgsForCall, struct { + }{}) + stub := fake.EnvCMUpdatedStub + fakeReturns := fake.envCMUpdatedReturns + fake.recordInvocation("EnvCMUpdated", []interface{}{}) + fake.envCMUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EnvCMUpdatedCallCount() int { + fake.envCMUpdatedMutex.RLock() + defer fake.envCMUpdatedMutex.RUnlock() + return len(fake.envCMUpdatedArgsForCall) +} + +func (fake *Update) EnvCMUpdatedCalls(stub func() bool) { + fake.envCMUpdatedMutex.Lock() + defer fake.envCMUpdatedMutex.Unlock() + fake.EnvCMUpdatedStub = stub +} + +func (fake *Update) EnvCMUpdatedReturns(result1 bool) { + fake.envCMUpdatedMutex.Lock() + defer fake.envCMUpdatedMutex.Unlock() + fake.EnvCMUpdatedStub = nil + fake.envCMUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EnvCMUpdatedReturnsOnCall(i int, result1 bool) { + fake.envCMUpdatedMutex.Lock() + defer fake.envCMUpdatedMutex.Unlock() + fake.EnvCMUpdatedStub = nil + if fake.envCMUpdatedReturnsOnCall == nil { + fake.envCMUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.envCMUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeeded() bool { + fake.restartNeededMutex.Lock() + ret, specificReturn := fake.restartNeededReturnsOnCall[len(fake.restartNeededArgsForCall)] + fake.restartNeededArgsForCall = append(fake.restartNeededArgsForCall, struct { + }{}) + stub := fake.RestartNeededStub + fakeReturns := fake.restartNeededReturns + fake.recordInvocation("RestartNeeded", []interface{}{}) + fake.restartNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) RestartNeededCallCount() int { + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + return len(fake.restartNeededArgsForCall) +} + +func (fake *Update) RestartNeededCalls(stub func() bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = stub +} + +func (fake *Update) RestartNeededReturns(result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + fake.restartNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeededReturnsOnCall(i int, result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + if fake.restartNeededReturnsOnCall == nil { + fake.restartNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.restartNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdated() bool { + fake.specUpdatedMutex.Lock() + ret, specificReturn := fake.specUpdatedReturnsOnCall[len(fake.specUpdatedArgsForCall)] + fake.specUpdatedArgsForCall = append(fake.specUpdatedArgsForCall, struct { + }{}) + stub := fake.SpecUpdatedStub + fakeReturns := fake.specUpdatedReturns + fake.recordInvocation("SpecUpdated", []interface{}{}) + fake.specUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) SpecUpdatedCallCount() int { + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + return len(fake.specUpdatedArgsForCall) +} + +func (fake *Update) SpecUpdatedCalls(stub func() bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = stub +} + +func (fake *Update) SpecUpdatedReturns(result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + fake.specUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdatedReturnsOnCall(i int, result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + if fake.specUpdatedReturnsOnCall == nil { + fake.specUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.specUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.consoleCMUpdatedMutex.RLock() + defer fake.consoleCMUpdatedMutex.RUnlock() + fake.deployerCMUpdatedMutex.RLock() + defer fake.deployerCMUpdatedMutex.RUnlock() + fake.envCMUpdatedMutex.RLock() + defer fake.envCMUpdatedMutex.RUnlock() + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Update) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseconsole.Update = new(Update) diff --git a/pkg/offering/base/console/override/consolecm.go b/pkg/offering/base/console/override/consolecm.go new file mode 100644 index 00000000..e35bf568 --- /dev/null +++ b/pkg/offering/base/console/override/consolecm.go @@ -0,0 +1,206 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func (o *Override) ConsoleCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateConsoleCM(instance, cm, options) + case resources.Update: + return o.UpdateConsoleCM(instance, cm, options) + } + + return nil +} + +func (o *Override) CreateConsoleCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + return errors.New("no create console cm defined, this needs to implemented") +} + +func (o *Override) UpdateConsoleCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + data := cm.Data["settings.yaml"] + + config := &consolev1.ConsoleSettingsConfig{} + err := yaml.Unmarshal([]byte(data), config) + if err != nil { + return err + } + + err = CommonConsoleCM(instance, config, options) + if err != nil { + return err + } + + bytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data["settings.yaml"] = string(bytes) + return nil +} + +func CommonConsoleCM(instance *current.IBPConsole, config *consolev1.ConsoleSettingsConfig, options map[string]interface{}) error { + config.DynamicConfig = true + config.IBMID = instance.Spec.IBMID + config.IAMApiKey = instance.Spec.IAMApiKey + config.SegmentWriteKey = instance.Spec.SegmentWriteKey + config.TrustProxy = "loopback, linklocal, uniquelocal" + + if instance.Spec.Email != "" { + config.Email = instance.Spec.Email + } + + if instance.Spec.AuthScheme != "" { + config.AuthScheme = instance.Spec.AuthScheme + } + + if instance.Spec.AllowDefaultPassword { + config.AllowDefaultPassword = true + } + + if instance.Spec.ConfigtxlatorURL != "" { + config.Configtxlator = instance.Spec.ConfigtxlatorURL + } + + if instance.Spec.DeployerURL != "" { + config.DeployerURL = instance.Spec.DeployerURL + } + + if instance.Spec.DeployerTimeout != 0 { + config.DeployerTimeout = instance.Spec.DeployerTimeout + } + + if instance.Spec.Components != "" { + config.DBCustomNames.Components = instance.Spec.Components + } + + if instance.Spec.Sessions != "" { + config.DBCustomNames.Sessions = instance.Spec.Sessions + } + + if instance.Spec.System != "" { + config.DBCustomNames.System = instance.Spec.System + } + + if instance.Spec.SystemChannel != "" { + config.SystemChannelID = instance.Spec.SystemChannel + } + + // ensures a default value + if instance.Spec.Proxying == nil { + t := true + instance.Spec.Proxying = &t + } + + if *instance.Spec.Proxying { + config.ProxyTLSReqs = "always" + } + + if instance.Spec.FeatureFlags != nil { + config.Featureflags = instance.Spec.FeatureFlags + } else { + config.Featureflags = &consolev1.FeatureFlags{ + ReadOnlyEnabled: new(bool), + ImportOnlyEnabled: new(bool), + CreateChannelEnabled: true, + RemotePeerConfigEnabled: true, + TemplatesEnabled: false, + CapabilitiesEnabled: true, + HighAvailability: true, + EnableNodeOU: true, + HSMEnabled: true, + ScaleRaftNodesEnabled: true, + Lifecycle20Enabled: true, + Patch14to20Enabled: true, + MustgatherEnabled: true, + InfraImportOptions: &consolev1.InfraImportOptions{ + SupportedCAs: []string{OPENSHIFT, K8S}, + SupportedOrderers: []string{OPENSHIFT, K8S}, + SupportedPeers: []string{OPENSHIFT, K8S}, + }, + } + } + + if instance.Spec.ClusterData != nil { + config.ClusterData = instance.Spec.ClusterData + } else { + config.ClusterData = &consolev1.IBPConsoleClusterData{} + } + + if config.ClusterData.Type == "" { + config.ClusterData.Type = "paid" + } + + crn := instance.Spec.CRN + if crn != nil { + config.CRN = &consolev1.CRN{ + Version: crn.Version, + CName: crn.CName, + CType: crn.CType, + Servicename: crn.Servicename, + Location: crn.Location, + AccountID: crn.AccountID, + InstanceID: crn.InstanceID, + ResourceType: crn.ResourceType, + ResourceID: crn.ResourceID, + } + config.CRNString = fmt.Sprintf("crn:%s:%s:%s:%s:%s:%s:%s:%s:%s", + crn.Version, crn.CName, crn.CType, crn.Servicename, crn.Location, crn.AccountID, crn.InstanceID, crn.ResourceType, crn.ResourceID) + } + + consoleOverrides, err := instance.Spec.GetOverridesConsole() + if err != nil { + return err + } + + if consoleOverrides.ActivityTrackerConsolePath != "" { + config.ActivityTrackerPath = consoleOverrides.ActivityTrackerConsolePath + } + + // This field is to indicate if the new way of setting up HSM + // with init sidecar is enabled + if consoleOverrides.HSM != "" && consoleOverrides.HSM != "false" { + config.HSM = "true" + } + + if options != nil && options["username"] != nil && options["password"] != nil { + config.DeployerURL = fmt.Sprintf("http://%s:%s@localhost:8080", options["username"].(string), options["password"].(string)) + } + + return nil +} diff --git a/pkg/offering/base/console/override/consolecm_test.go b/pkg/offering/base/console/override/consolecm_test.go new file mode 100644 index 00000000..e3779dea --- /dev/null +++ b/pkg/offering/base/console/override/consolecm_test.go @@ -0,0 +1,185 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Console Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + Email: "test@ibm.com", + AuthScheme: "scheme1", + AllowDefaultPassword: true, + ConfigtxlatorURL: "configtx.ibm.com", + DeployerURL: "deployer.ibm.com", + DeployerTimeout: 5, + Components: "component1", + Sessions: "session1", + System: "system1", + SystemChannel: "channel1", + FeatureFlags: &consolev1.FeatureFlags{ + CreateChannelEnabled: true, + }, + ClusterData: &consolev1.IBPConsoleClusterData{ + Zones: []string{"zone1"}, + Type: "type1", + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../definitions/console/console-configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("returns an error if base create function called", func() { + err := overrider.ConsoleCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no create console cm defined, this needs to implemented")) + }) + }) + + Context("update", func() { + It("overrides values based on spec", func() { + err := overrider.ConsoleCM(instance, cm, resources.Update, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &consolev1.ConsoleSettingsConfig{} + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + By("setting email", func() { + Expect(config.Email).To(Equal(instance.Spec.Email)) + }) + + By("setting auth scheme", func() { + Expect(config.AuthScheme).To(Equal(instance.Spec.AuthScheme)) + }) + + By("setting allow_default_password", func() { + Expect(config.AllowDefaultPassword).To(Equal(instance.Spec.AllowDefaultPassword)) + }) + + By("setting configtxlator URL", func() { + Expect(config.Configtxlator).To(Equal(instance.Spec.ConfigtxlatorURL)) + }) + + By("setting Deployer URL", func() { + Expect(config.DeployerURL).To(Equal(instance.Spec.DeployerURL)) + }) + + By("setting Deployer timeout", func() { + Expect(config.DeployerTimeout).To(Equal(instance.Spec.DeployerTimeout)) + }) + + By("setting components", func() { + Expect(config.DBCustomNames.Components).To(Equal(instance.Spec.Components)) + }) + + By("setting sessions", func() { + Expect(config.DBCustomNames.Sessions).To(Equal(instance.Spec.Sessions)) + }) + + By("setting system", func() { + Expect(config.DBCustomNames.System).To(Equal(instance.Spec.System)) + }) + + By("setting system channel", func() { + Expect(config.SystemChannelID).To(Equal(instance.Spec.SystemChannel)) + }) + + By("setting Proxy TLS Reqs", func() { + Expect(config.ProxyTLSReqs).To(Equal("always")) + }) + + By("settings feature flags", func() { + Expect(config.Featureflags).To(Equal(instance.Spec.FeatureFlags)) + }) + + By("settings cluster data", func() { + Expect(config.ClusterData).To(Equal(instance.Spec.ClusterData)) + }) + + By("setting trust proxy", func() { + Expect(config.TrustProxy).To(Equal("loopback, linklocal, uniquelocal")) + }) + }) + + It("overrides values based on defaults", func() { + instance = ¤t.IBPConsole{} + err := overrider.ConsoleCM(instance, cm, resources.Update, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &consolev1.ConsoleSettingsConfig{} + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + By("settings feature flags", func() { + ff := &consolev1.FeatureFlags{ + ReadOnlyEnabled: new(bool), + ImportOnlyEnabled: new(bool), + CreateChannelEnabled: true, + RemotePeerConfigEnabled: true, + TemplatesEnabled: false, + CapabilitiesEnabled: true, + HighAvailability: true, + EnableNodeOU: true, + HSMEnabled: true, + ScaleRaftNodesEnabled: true, + Lifecycle20Enabled: true, + Patch14to20Enabled: true, + MustgatherEnabled: true, + InfraImportOptions: &consolev1.InfraImportOptions{ + SupportedCAs: []string{override.OPENSHIFT, override.K8S}, + SupportedOrderers: []string{override.OPENSHIFT, override.K8S}, + SupportedPeers: []string{override.OPENSHIFT, override.K8S}, + }, + } + + Expect(config.Featureflags).To(Equal(ff)) + }) + + By("settings cluster data", func() { + cd := &consolev1.IBPConsoleClusterData{ + Type: "paid", + } + Expect(config.ClusterData).To(Equal(cd)) + }) + }) + }) +}) diff --git a/pkg/offering/base/console/override/deployercm.go b/pkg/offering/base/console/override/deployercm.go new file mode 100644 index 00000000..cf316150 --- /dev/null +++ b/pkg/offering/base/console/override/deployercm.go @@ -0,0 +1,197 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "encoding/json" + "errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/defaultconfig/console" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func (o *Override) DeployerCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateDeployerCM(instance, cm, options) + case resources.Update: + return o.UpdateDeployerCM(instance, cm, options) + } + + return nil +} + +func (o *Override) CreateDeployerCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + return errors.New("no create deployer cm defined, this needs to implemented") +} + +func (o *Override) UpdateDeployerCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + data := cm.Data["settings.yaml"] + + config := &deployer.Config{} + err := yaml.Unmarshal([]byte(data), config) + if err != nil { + return err + } + + err = CommonDeployerCM(instance, config, options) + if err != nil { + return err + } + + bytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data["settings.yaml"] = string(bytes) + + return nil +} + +func CommonDeployerCM(instance *current.IBPConsole, config *deployer.Config, options map[string]interface{}) error { + if len(instance.Spec.ImagePullSecrets) == 0 { + return errors.New("no image pull secret provided") + } + + if instance.Spec.NetworkInfo == nil || instance.Spec.NetworkInfo.Domain == "" { + return errors.New("no domain provided") + } + + config.ImagePullSecrets = instance.Spec.ImagePullSecrets + config.Domain = instance.Spec.NetworkInfo.Domain + + if instance.Spec.Deployer != nil { + if instance.Spec.Deployer.CreateDB { + config.Database.CreateDB = instance.Spec.Deployer.CreateDB + } + if instance.Spec.Deployer.ComponentsDB != "" { + config.Database.Components.Name = instance.Spec.Deployer.ComponentsDB + } + if instance.Spec.Deployer.ConnectionString != "" { + config.Database.ConnectionURL = instance.Spec.Deployer.ConnectionString + } + } + + registryURL := instance.Spec.RegistryURL + arch := "amd64" + if instance.Spec.Arch != nil && len(instance.Spec.Arch) > 0 { + arch = instance.Spec.Arch[0] + } + + if instance.Spec.UseTags != nil && instance.Spec.UseTags != config.UseTags { + config.UseTags = instance.Spec.UseTags + } + + requestedVersions := &deployer.Versions{} + if instance.Spec.Versions != nil { + // convert spec version to deployer config versions + instanceVersionBytes, err := json.Marshal(instance.Spec.Versions) + if err != nil { + return err + } + err = json.Unmarshal(instanceVersionBytes, requestedVersions) + if err != nil { + return err + } + } else { + // use default config versions + requestedVersions = config.Versions + } + config.Versions.Override(requestedVersions, registryURL, arch) + + images := instance.Spec.Images + if images == nil { + images = ¤t.ConsoleImages{} + } + defaultimage := console.GetImages() + + // TODO:OSS what happens if defaultimage is empty + mustgatherImage := image.GetImage(registryURL, defaultimage.MustgatherImage, images.MustgatherImage) + mustgatherTag := image.GetTag(arch, defaultimage.MustgatherTag, images.MustgatherTag) + + config.OtherImages = &deployer.OtherImages{ + MustgatherImage: mustgatherImage, + MustgatherTag: mustgatherTag, + } + + config.ServiceAccount = instance.GetName() + + storageClassName := "" + if instance.Spec.Storage != nil && instance.Spec.Storage.Console != nil { + storageClassName = instance.Spec.Storage.Console.Class + } + + config.Defaults.Storage.CA.CA.Class = storageClassName + config.Defaults.Storage.Peer.Peer.Class = storageClassName + config.Defaults.Storage.Peer.StateDB.Class = storageClassName + config.Defaults.Storage.Orderer.Orderer.Class = storageClassName + + crn := instance.Spec.CRN + if crn != nil { + config.CRN = ¤t.CRN{ + Version: crn.Version, + CName: crn.CName, + CType: crn.CType, + Servicename: crn.Servicename, + Location: crn.Location, + AccountID: crn.AccountID, + InstanceID: crn.InstanceID, + ResourceType: crn.ResourceType, + ResourceID: crn.ResourceID, + } + } + + // used for passing separate domains for optools and deployer + if instance.Spec.Deployer != nil && instance.Spec.Deployer.Domain != "" { + config.Domain = instance.Spec.Deployer.Domain + } + + deployerOverrides, err := instance.Spec.GetOverridesDeployer() + if err != nil { + return err + } + if deployerOverrides != nil && deployerOverrides.Timeouts != nil { + config.Timeouts = &deployer.Timeouts{} + if deployerOverrides.Timeouts.APIServer != 0 { + config.Timeouts.APIServer = deployerOverrides.Timeouts.APIServer + } + if deployerOverrides.Timeouts.Deployment != 0 { + config.Timeouts.Deployment = deployerOverrides.Timeouts.Deployment + } + } + + if options != nil && options["username"] != nil && options["password"] != nil { + config.Auth.Username = options["username"].(string) + config.Auth.Password = options["password"].(string) + } + + return nil +} diff --git a/pkg/offering/base/console/override/deployercm_test.go b/pkg/offering/base/console/override/deployercm_test.go new file mode 100644 index 00000000..bf308909 --- /dev/null +++ b/pkg/offering/base/console/override/deployercm_test.go @@ -0,0 +1,746 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Console Deployer Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + ImagePullSecrets: []string{"pullsecret"}, + ConnectionString: "connectionString1", + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Class: "sc1", + }, + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain1", + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + "v1-0": current.VersionCA{ + Default: true, + Version: "v1-0", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.0.0", + CAImage: "ca-image", + CATag: "1.0.0", + }, + }, + "v2-0": current.VersionCA{ + Default: false, + Version: "v2-0", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "2.0.0", + CAImage: "ca-image", + CATag: "2.0.0", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "v1-0": current.VersionPeer{ + Default: true, + Version: "v1-0", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.0.0", + PeerImage: "peer-image", + PeerTag: "1.0.0", + DindImage: "dind-iamge", + DindTag: "1.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + FluentdImage: "fluentd-image", + FluentdTag: "1.0.0", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.0.0", + }, + }, + "v2-0": current.VersionPeer{ + Default: false, + Version: "v2-0", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "2.0.0", + PeerImage: "peer-image", + PeerTag: "2.0.0", + DindImage: "dind-iamge", + DindTag: "2.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "2.0.0", + FluentdImage: "fluentd-image", + FluentdTag: "2.0.0", + CouchDBImage: "couchdb-image", + CouchDBTag: "2.0.0", + CCLauncherImage: "cclauncher-image", + CCLauncherTag: "2.0.0", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "v1-0": current.VersionOrderer{ + Default: true, + Version: "v1-0", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.0.0", + OrdererImage: "orderer-image", + OrdererTag: "1.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + }, + }, + "v2-0": current.VersionOrderer{ + Default: false, + Version: "v2-0", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "2.0.0", + OrdererImage: "orderer-image", + OrdererTag: "2.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "2.0.0", + }, + }, + }, + }, + CRN: ¤t.CRN{ + CName: "cname", + CType: "ctype", + Location: "location1", + Servicename: "Servicename1", + Version: "version1", + AccountID: "id123", + }, + Deployer: ¤t.Deployer{ + ConnectionString: "connectionstring2", + }, + Images: ¤t.ConsoleImages{ + MustgatherImage: "test-image", + MustgatherTag: "test-tag", + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../testdata/deployercm/deployer-configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("returns an error if base create function called", func() { + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no create deployer cm defined, this needs to implemented")) + }) + }) + + Context("update", func() { + It("return an error if no image pull secret provided", func() { + instance.Spec.ImagePullSecrets = []string{} + err := overrider.DeployerCM(instance, cm, resources.Update, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no image pull secret provided")) + }) + + It("return an error if no domain provided", func() { + instance.Spec.NetworkInfo.Domain = "" + err := overrider.DeployerCM(instance, cm, resources.Update, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no domain provided")) + }) + + It("overrides values based on spec", func() { + err := overrider.DeployerCM(instance, cm, resources.Update, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &deployer.Config{} + + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + By("setting image pull secret", func() { + Expect(config.ImagePullSecrets).To(Equal(instance.Spec.ImagePullSecrets)) + }) + + By("setting connection string", func() { + Expect(config.Database.ConnectionURL).To(Equal(instance.Spec.Deployer.ConnectionString)) + }) + + By("setting versions", func() { + expectedVersions := ¤t.Versions{ + CA: map[string]current.VersionCA{ + "v1-0": current.VersionCA{ + Default: true, + Version: "v1-0", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.0.0", + CAImage: "ca-image", + CATag: "1.0.0", + }, + }, + "v2-0": current.VersionCA{ + Default: false, + Version: "v2-0", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "2.0.0", + CAImage: "ca-image", + CATag: "2.0.0", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "v1-0": current.VersionPeer{ + Default: true, + Version: "v1-0", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.0.0", + PeerImage: "peer-image", + PeerTag: "1.0.0", + DindImage: "dind-iamge", + DindTag: "1.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + FluentdImage: "fluentd-image", + FluentdTag: "1.0.0", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.0.0", + }, + }, + "v2-0": current.VersionPeer{ + Default: false, + Version: "v2-0", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "2.0.0", + PeerImage: "peer-image", + PeerTag: "2.0.0", + DindImage: "dind-iamge", + DindTag: "2.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "2.0.0", + FluentdImage: "fluentd-image", + FluentdTag: "2.0.0", + CouchDBImage: "couchdb-image", + CouchDBTag: "2.0.0", + CCLauncherImage: "cclauncher-image", + CCLauncherTag: "2.0.0", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "v1-0": current.VersionOrderer{ + Default: true, + Version: "v1-0", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.0.0", + OrdererImage: "orderer-image", + OrdererTag: "1.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + }, + }, + "v2-0": current.VersionOrderer{ + Default: false, + Version: "v2-0", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "2.0.0", + OrdererImage: "orderer-image", + OrdererTag: "2.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "2.0.0", + }, + }, + }, + } + + typeConvertedVersions := ¤t.Versions{} + util.ConvertSpec(config.Versions, typeConvertedVersions) + Expect(typeConvertedVersions).To(Equal(expectedVersions)) + }) + + By("setting storage class name", func() { + Expect(config.Defaults.Storage.CA.CA.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Peer.Peer.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Peer.StateDB.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Orderer.Orderer.Class).To(Equal(instance.Spec.Storage.Console.Class)) + }) + + By("setting CRN", func() { + crn := ¤t.CRN{ + CName: instance.Spec.CRN.CName, + CType: instance.Spec.CRN.CType, + Location: instance.Spec.CRN.Location, + Servicename: instance.Spec.CRN.Servicename, + Version: instance.Spec.CRN.Version, + AccountID: instance.Spec.CRN.AccountID, + } + Expect(config.CRN).To(Equal(crn)) + }) + + By("setting domain", func() { + Expect(config.Domain).To(Equal(instance.Spec.NetworkInfo.Domain)) + }) + + By("setting must gather images", func() { + Expect(config.OtherImages.MustgatherImage).To(Equal("test-image")) + Expect(config.OtherImages.MustgatherTag).To(Equal("test-tag")) + }) + }) + + It("should get default versions if overrides are not passed", func() { + instance.Spec.Versions = nil + err := overrider.DeployerCM(instance, cm, resources.Update, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &deployer.Config{} + + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("common overrides", func() { + Context("version overrides", func() { + When("registry url is not set", func() { + BeforeEach(func() { + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + ImagePullSecrets: []string{"pullsecret"}, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain1", + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + "v1-0": current.VersionCA{ + Image: current.CAImages{ + CAInitImage: "ghcr.io/ibm-blockchain/ca-init-image", + CAInitTag: "1.0.0", + CAImage: "ghcr.io/ibm-blockchain/ca-image", + CATag: "1.0.0", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "v1-0": current.VersionPeer{ + Image: current.PeerImages{ + PeerInitImage: "ghcr.io/ibm-blockchain/peer-init-image", + PeerInitTag: "1.0.0", + PeerImage: "ghcr.io/ibm-blockchain/peer-image", + PeerTag: "1.0.0", + DindImage: "ghcr.io/ibm-blockchain/dind-iamge", + DindTag: "1.0.0", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb-image", + GRPCWebTag: "1.0.0", + FluentdImage: "ghcr.io/ibm-blockchain/fluentd-image", + FluentdTag: "1.0.0", + CouchDBImage: "ghcr.io/ibm-blockchain/couchdb-image", + CouchDBTag: "1.0.0", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "v1-0": current.VersionOrderer{ + Image: current.OrdererImages{ + OrdererInitImage: "ghcr.io/ibm-blockchain/orderer-init-image", + OrdererInitTag: "1.0.0", + OrdererImage: "ghcr.io/ibm-blockchain/orderer-image", + OrdererTag: "1.0.0", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb-image", + GRPCWebTag: "1.0.0", + }, + }, + }, + }, + }, + } + }) + + It("keeps images as passed", func() { + expectedVersions := ¤t.Versions{ + CA: map[string]current.VersionCA{ + "v1-0": current.VersionCA{ + Image: current.CAImages{ + CAInitImage: "ghcr.io/ibm-blockchain/ca-init-image", + CAInitTag: "1.0.0-amd64", + CAImage: "ghcr.io/ibm-blockchain/ca-image", + CATag: "1.0.0-amd64", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "v1-0": current.VersionPeer{ + Image: current.PeerImages{ + PeerInitImage: "ghcr.io/ibm-blockchain/peer-init-image", + PeerInitTag: "1.0.0-amd64", + PeerImage: "ghcr.io/ibm-blockchain/peer-image", + PeerTag: "1.0.0-amd64", + DindImage: "ghcr.io/ibm-blockchain/dind-iamge", + DindTag: "1.0.0-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb-image", + GRPCWebTag: "1.0.0-amd64", + FluentdImage: "ghcr.io/ibm-blockchain/fluentd-image", + FluentdTag: "1.0.0-amd64", + CouchDBImage: "ghcr.io/ibm-blockchain/couchdb-image", + CouchDBTag: "1.0.0-amd64", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "v1-0": current.VersionOrderer{ + Image: current.OrdererImages{ + OrdererInitImage: "ghcr.io/ibm-blockchain/orderer-init-image", + OrdererInitTag: "1.0.0-amd64", + OrdererImage: "ghcr.io/ibm-blockchain/orderer-image", + OrdererTag: "1.0.0-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb-image", + GRPCWebTag: "1.0.0-amd64", + }, + }, + }, + } + versions := &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4": deployer.VersionCA{ + Image: deployer.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.0.0", + CAInitDigest: "", + CAImage: "ca-image", + CATag: "1.0.0", + CADigest: "", + }, + }, + }, + Peer: map[string]deployer.VersionPeer{ + "1.4": deployer.VersionPeer{ + Image: deployer.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.0.0", + PeerInitDigest: "", + PeerImage: "peer-image", + PeerTag: "1.0.0", + PeerDigest: "", + DindImage: "dind-iamge", + DindTag: "1.0.0", + DindDigest: "", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + GRPCWebDigest: "", + FluentdImage: "fluentd-image", + FluentdTag: "1.0.0", + FluentdDigest: "", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.0.0", + CouchDBDigest: "", + }, + }, + }, + Orderer: map[string]deployer.VersionOrderer{ + "1.4": deployer.VersionOrderer{ + Image: deployer.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.0.0", + OrdererInitDigest: "", + OrdererImage: "orderer-image", + OrdererTag: "1.0.0", + OrdererDigest: "", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + GRPCWebDigest: "", + }, + }, + }, + } + config := &deployer.Config{ + Versions: versions, + Defaults: &deployer.Defaults{ + Storage: &deployer.Storage{ + Peer: ¤t.PeerStorages{ + Peer: ¤t.StorageSpec{}, + StateDB: ¤t.StorageSpec{}, + }, + CA: ¤t.CAStorages{ + CA: ¤t.StorageSpec{}, + }, + Orderer: ¤t.OrdererStorages{ + Orderer: ¤t.StorageSpec{}, + }, + }, + Resources: &deployer.Resources{}, + }, + } + err := override.CommonDeployerCM(instance, config, nil) + Expect(err).NotTo(HaveOccurred()) + // verify CA images and tags + Expect(config.Versions.CA["1.4"].Image.CAImage).To(Equal(expectedVersions.CA["1.4"].Image.CAImage)) + Expect(config.Versions.CA["1.4"].Image.CATag).To(Equal(expectedVersions.CA["1.4"].Image.CATag)) + Expect(config.Versions.CA["1.4"].Image.CAInitImage).To(Equal(expectedVersions.CA["1.4"].Image.CAInitImage)) + Expect(config.Versions.CA["1.4"].Image.CAInitTag).To(Equal(expectedVersions.CA["1.4"].Image.CAInitTag)) + // verify Peer images and tags + Expect(config.Versions.Peer["1.4"].Image.PeerInitImage).To(Equal(expectedVersions.Peer["1.4"].Image.PeerInitImage)) + Expect(config.Versions.Peer["1.4"].Image.PeerInitTag).To(Equal(expectedVersions.Peer["1.4"].Image.PeerInitTag)) + Expect(config.Versions.Peer["1.4"].Image.PeerImage).To(Equal(expectedVersions.Peer["1.4"].Image.PeerImage)) + Expect(config.Versions.Peer["1.4"].Image.PeerTag).To(Equal(expectedVersions.Peer["1.4"].Image.PeerTag)) + Expect(config.Versions.Peer["1.4"].Image.DindImage).To(Equal(expectedVersions.Peer["1.4"].Image.DindImage)) + Expect(config.Versions.Peer["1.4"].Image.DindTag).To(Equal(expectedVersions.Peer["1.4"].Image.DindTag)) + Expect(config.Versions.Peer["1.4"].Image.FluentdImage).To(Equal(expectedVersions.Peer["1.4"].Image.FluentdImage)) + Expect(config.Versions.Peer["1.4"].Image.FluentdTag).To(Equal(expectedVersions.Peer["1.4"].Image.FluentdTag)) + Expect(config.Versions.Peer["1.4"].Image.CouchDBImage).To(Equal(expectedVersions.Peer["1.4"].Image.CouchDBImage)) + Expect(config.Versions.Peer["1.4"].Image.CouchDBTag).To(Equal(expectedVersions.Peer["1.4"].Image.CouchDBTag)) + Expect(config.Versions.Peer["1.4"].Image.GRPCWebImage).To(Equal(expectedVersions.Peer["1.4"].Image.GRPCWebImage)) + Expect(config.Versions.Peer["1.4"].Image.GRPCWebTag).To(Equal(expectedVersions.Peer["1.4"].Image.GRPCWebTag)) + // verify Orderer images and tags + Expect(config.Versions.Orderer["1.4"].Image.OrdererImage).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererImage)) + Expect(config.Versions.Orderer["1.4"].Image.OrdererTag).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererTag)) + Expect(config.Versions.Orderer["1.4"].Image.OrdererInitImage).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererInitImage)) + Expect(config.Versions.Orderer["1.4"].Image.OrdererInitTag).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererInitTag)) + Expect(config.Versions.Orderer["1.4"].Image.GRPCWebImage).To(Equal(expectedVersions.Orderer["1.4"].Image.GRPCWebImage)) + Expect(config.Versions.Orderer["1.4"].Image.GRPCWebTag).To(Equal(expectedVersions.Orderer["1.4"].Image.GRPCWebTag)) + }) + }) + + When("registry url is set", func() { + BeforeEach(func() { + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + ImagePullSecrets: []string{"pullsecret"}, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain1", + }, + RegistryURL: "ghcr.io/ibm-blockchain/", + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + "v1-0": current.VersionCA{ + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.0.0", + CAImage: "ca-image", + CATag: "1.0.0", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "v1-0": current.VersionPeer{ + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.0.0", + PeerImage: "peer-image", + PeerTag: "1.0.0", + DindImage: "dind-iamge", + DindTag: "1.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + FluentdImage: "fluentd-image", + FluentdTag: "1.0.0", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.0.0", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "v1-0": current.VersionOrderer{ + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.0.0", + OrdererImage: "orderer-image", + OrdererTag: "1.0.0", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + }, + }, + }, + }, + }, + } + }) + + It("prepends registry url to images", func() { + expectedVersions := ¤t.Versions{ + CA: map[string]current.VersionCA{ + "v1-0": current.VersionCA{ + Image: current.CAImages{ + CAInitImage: "ghcr.io/ibm-blockchain/ca-init-image", + CAInitTag: "1.0.0-amd64", + CAImage: "ghcr.io/ibm-blockchain/ca-image", + CATag: "1.0.0-amd64", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "v1-0": current.VersionPeer{ + Image: current.PeerImages{ + PeerInitImage: "ghcr.io/ibm-blockchain/peer-init-image", + PeerInitTag: "1.0.0-amd64", + PeerImage: "ghcr.io/ibm-blockchain/peer-image", + PeerTag: "1.0.0-amd64", + DindImage: "ghcr.io/ibm-blockchain/dind-iamge", + DindTag: "1.0.0-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb-image", + GRPCWebTag: "1.0.0-amd64", + FluentdImage: "ghcr.io/ibm-blockchain/fluentd-image", + FluentdTag: "1.0.0-amd64", + CouchDBImage: "ghcr.io/ibm-blockchain/couchdb-image", + CouchDBTag: "1.0.0-amd64", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "v1-0": current.VersionOrderer{ + Image: current.OrdererImages{ + OrdererInitImage: "ghcr.io/ibm-blockchain/orderer-init-image", + OrdererInitTag: "1.0.0-amd64", + OrdererImage: "ghcr.io/ibm-blockchain/orderer-image", + OrdererTag: "1.0.0-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcweb-image", + GRPCWebTag: "1.0.0-amd64", + }, + }, + }, + } + versions := &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4": deployer.VersionCA{ + Image: deployer.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.0.0", + CAInitDigest: "", + CAImage: "ca-image", + CATag: "1.0.0", + CADigest: "", + }, + }, + }, + Peer: map[string]deployer.VersionPeer{ + "1.4": deployer.VersionPeer{ + Image: deployer.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.0.0", + PeerInitDigest: "", + PeerImage: "peer-image", + PeerTag: "1.0.0", + PeerDigest: "", + DindImage: "dind-iamge", + DindTag: "1.0.0", + DindDigest: "", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + GRPCWebDigest: "", + FluentdImage: "fluentd-image", + FluentdTag: "1.0.0", + FluentdDigest: "", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.0.0", + CouchDBDigest: "", + }, + }, + }, + Orderer: map[string]deployer.VersionOrderer{ + "1.4": deployer.VersionOrderer{ + Image: deployer.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.0.0", + OrdererInitDigest: "", + OrdererImage: "orderer-image", + OrdererTag: "1.0.0", + OrdererDigest: "", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.0.0", + GRPCWebDigest: "", + }, + }, + }, + } + config := &deployer.Config{ + Versions: versions, + Defaults: &deployer.Defaults{ + Storage: &deployer.Storage{ + Peer: ¤t.PeerStorages{ + Peer: ¤t.StorageSpec{}, + StateDB: ¤t.StorageSpec{}, + }, + CA: ¤t.CAStorages{ + CA: ¤t.StorageSpec{}, + }, + Orderer: ¤t.OrdererStorages{ + Orderer: ¤t.StorageSpec{}, + }, + }, + Resources: &deployer.Resources{}, + }, + } + err := override.CommonDeployerCM(instance, config, nil) + Expect(err).NotTo(HaveOccurred()) + // verify CA images and tags + Expect(config.Versions.CA["1.4"].Image.CAImage).To(Equal(expectedVersions.CA["1.4"].Image.CAImage)) + Expect(config.Versions.CA["1.4"].Image.CATag).To(Equal(expectedVersions.CA["1.4"].Image.CATag)) + Expect(config.Versions.CA["1.4"].Image.CAInitImage).To(Equal(expectedVersions.CA["1.4"].Image.CAInitImage)) + Expect(config.Versions.CA["1.4"].Image.CAInitTag).To(Equal(expectedVersions.CA["1.4"].Image.CAInitTag)) + // verify Peer images and tags + Expect(config.Versions.Peer["1.4"].Image.PeerInitImage).To(Equal(expectedVersions.Peer["1.4"].Image.PeerInitImage)) + Expect(config.Versions.Peer["1.4"].Image.PeerInitTag).To(Equal(expectedVersions.Peer["1.4"].Image.PeerInitTag)) + Expect(config.Versions.Peer["1.4"].Image.PeerImage).To(Equal(expectedVersions.Peer["1.4"].Image.PeerImage)) + Expect(config.Versions.Peer["1.4"].Image.PeerTag).To(Equal(expectedVersions.Peer["1.4"].Image.PeerTag)) + Expect(config.Versions.Peer["1.4"].Image.DindImage).To(Equal(expectedVersions.Peer["1.4"].Image.DindImage)) + Expect(config.Versions.Peer["1.4"].Image.DindTag).To(Equal(expectedVersions.Peer["1.4"].Image.DindTag)) + Expect(config.Versions.Peer["1.4"].Image.FluentdImage).To(Equal(expectedVersions.Peer["1.4"].Image.FluentdImage)) + Expect(config.Versions.Peer["1.4"].Image.FluentdTag).To(Equal(expectedVersions.Peer["1.4"].Image.FluentdTag)) + Expect(config.Versions.Peer["1.4"].Image.CouchDBImage).To(Equal(expectedVersions.Peer["1.4"].Image.CouchDBImage)) + Expect(config.Versions.Peer["1.4"].Image.CouchDBTag).To(Equal(expectedVersions.Peer["1.4"].Image.CouchDBTag)) + Expect(config.Versions.Peer["1.4"].Image.GRPCWebImage).To(Equal(expectedVersions.Peer["1.4"].Image.GRPCWebImage)) + Expect(config.Versions.Peer["1.4"].Image.GRPCWebTag).To(Equal(expectedVersions.Peer["1.4"].Image.GRPCWebTag)) + // verify Orderer images and tags + Expect(config.Versions.Orderer["1.4"].Image.OrdererImage).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererImage)) + Expect(config.Versions.Orderer["1.4"].Image.OrdererTag).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererTag)) + Expect(config.Versions.Orderer["1.4"].Image.OrdererInitImage).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererInitImage)) + Expect(config.Versions.Orderer["1.4"].Image.OrdererInitTag).To(Equal(expectedVersions.Orderer["1.4"].Image.OrdererInitTag)) + Expect(config.Versions.Orderer["1.4"].Image.GRPCWebImage).To(Equal(expectedVersions.Orderer["1.4"].Image.GRPCWebImage)) + Expect(config.Versions.Orderer["1.4"].Image.GRPCWebTag).To(Equal(expectedVersions.Orderer["1.4"].Image.GRPCWebTag)) + }) + }) + }) + }) +}) diff --git a/pkg/offering/base/console/override/deployerservice.go b/pkg/offering/base/console/override/deployerservice.go new file mode 100644 index 00000000..2115f693 --- /dev/null +++ b/pkg/offering/base/console/override/deployerservice.go @@ -0,0 +1,63 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) DeployerService(object v1.Object, service *corev1.Service, action resources.Action) error { + instance := object.(*current.IBPConsole) + + switch action { + case resources.Create: + return o.CreateDeployerService(instance, service) + case resources.Update: + return o.UpdateDeployerService(instance, service) + } + + return nil +} + +func (o *Override) CreateDeployerService(instance *current.IBPConsole, service *corev1.Service) error { + err := o.CommonDeployerServiceOverride(instance, service) + if err != nil { + return err + } + + return nil +} + +func (o *Override) UpdateDeployerService(instance *current.IBPConsole, service *corev1.Service) error { + return nil +} + +func (o *Override) CommonDeployerServiceOverride(instance *current.IBPConsole, service *corev1.Service) error { + if instance.Spec.Service != nil { + serviceType := instance.Spec.Service.Type + if serviceType != "" { + service.Spec.Type = serviceType + } + } + + return nil +} diff --git a/pkg/offering/base/console/override/deployerservice_test.go b/pkg/offering/base/console/override/deployerservice_test.go new file mode 100644 index 00000000..e516e707 --- /dev/null +++ b/pkg/offering/base/console/override/deployerservice_test.go @@ -0,0 +1,74 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Console Deployer Service Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + service *corev1.Service + ) + + BeforeEach(func() { + var err error + + service, err = util.GetServiceFromFile("../../../../../definitions/console/deployer-service.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + Service: ¤t.Service{ + Type: corev1.ServiceTypeNodePort, + }, + NetworkInfo: ¤t.NetworkInfo{ + ConsolePort: 1234, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec with devmode on", func() { + instance.Spec.FeatureFlags = &consolev1.FeatureFlags{ + DevMode: true, + } + + err := overrider.DeployerService(instance, service, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service type", func() { + Expect(service.Spec.Type).To(Equal(instance.Spec.Service.Type)) + }) + }) + + }) +}) diff --git a/pkg/offering/base/console/override/deployment.go b/pkg/offering/base/console/override/deployment.go new file mode 100644 index 00000000..1156de8d --- /dev/null +++ b/pkg/offering/base/console/override/deployment.go @@ -0,0 +1,458 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "encoding/json" + "net/url" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + defaultconsole "github.com/IBM-Blockchain/fabric-operator/defaultconfig/console" + deployerimgs "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Container names +const ( + INIT = "init" + CONSOLE = "optools" + DEPLOYER = "deployer" + CONFIGTXLATOR = "configtxlator" + COUCHDB = "couchdb" +) + +func (o *Override) Deployment(object v1.Object, deployment *appsv1.Deployment, action resources.Action) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateDeployment(instance, deployment) + case resources.Update: + return o.UpdateDeployment(instance, deployment) + } + + return nil +} + +func (o *Override) CreateDeployment(instance *current.IBPConsole, k8sDep *appsv1.Deployment) error { + deployment := dep.New(k8sDep) + + name := instance.GetName() + deployment.SetServiceAccountName(serviceaccount.GetName(name)) + + // Make sure containers exist + console, err := deployment.GetContainer(CONSOLE) + if err != nil { + return errors.New("console container not found in deployment spec") + } + _, err = deployment.GetContainer(INIT) + if err != nil { + return errors.New("init container not found in deployment spec") + } + _, err = deployment.GetContainer(DEPLOYER) + if err != nil { + return errors.New("deployer container not found in deployment spec") + } + _, err = deployment.GetContainer(CONFIGTXLATOR) + if err != nil { + return errors.New("configtxlator container not found in deployment spec") + } + + if !instance.Spec.UsingRemoteDB() { + couchdb := o.CreateCouchdbContainer() + + couchdb.AppendVolumeMountWithSubPathIfMissing("couchdb", "/opt/couchdb/data", "data") + deployment.AddContainer(couchdb) + } + + err = o.CommonDeployment(instance, deployment) + if err != nil { + return err + } + + deployment.SetImagePullSecrets(instance.Spec.ImagePullSecrets) + + console.AppendConfigMapFromSourceIfMissing(name) + + passwordSecretName := instance.Spec.PasswordSecretName + valueFrom := &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: passwordSecretName, + }, + Key: "password", + }, + } + console.AppendEnvVarValueFromIfMissing("DEFAULT_USER_PASSWORD_INITIAL", valueFrom) + + tlsSecretName := instance.Spec.TLSSecretName + if tlsSecretName != "" { + console.AppendVolumeMountIfMissing("tls-certs", "/certs/tls") + deployment.AppendSecretVolumeIfMissing("tls-certs", tlsSecretName) + } else { + // TODO: generate and create the TLS Secret here itself + } + + if !instance.Spec.UsingRemoteDB() { + deployment.AppendPVCVolumeIfMissing("couchdb", instance.Name+"-pvc") + } + + deployment.AppendConfigMapVolumeIfMissing("deployer-template", name+"-deployer") + deployment.AppendConfigMapVolumeIfMissing("template", name+"-console") + deployment.SetAffinity(o.GetAffinity(instance)) + + return nil +} + +func (o *Override) UpdateDeployment(instance *current.IBPConsole, k8sDep *appsv1.Deployment) error { + deployment := dep.New(k8sDep) + return o.CommonDeployment(instance, deployment) +} + +func (o *Override) CommonDeployment(instance *current.IBPConsole, deployment *dep.Deployment) error { + init := deployment.MustGetContainer(INIT) + console := deployment.MustGetContainer(CONSOLE) + deployer := deployment.MustGetContainer(DEPLOYER) + configtxlator := deployment.MustGetContainer(CONFIGTXLATOR) + + registryURL := instance.Spec.RegistryURL + arch := "amd64" + if instance.Spec.Arch != nil { + arch = instance.Spec.Arch[0] + } + + images := &deployerimgs.ConsoleImages{} + if instance.Spec.Images != nil { + // convert spec images to deployer config images + instanceImgBytes, err := json.Marshal(instance.Spec.Images) + if err != nil { + return err + } + err = json.Unmarshal(instanceImgBytes, images) + if err != nil { + return err + } + } + + var consoleImage, consoleTag, initImage, initTag, deployerImage, deployerTag string + var configtxlatorImage, configtxlatorTag, couchdbImage, couchdbTag string + + defaultimage := defaultconsole.GetImages() + consoleImage = image.GetImage(registryURL, defaultimage.ConsoleImage, images.ConsoleImage) + initImage = image.GetImage(registryURL, defaultimage.ConsoleInitImage, images.ConsoleInitImage) + deployerImage = image.GetImage(registryURL, defaultimage.DeployerImage, images.DeployerImage) + configtxlatorImage = image.GetImage(registryURL, defaultimage.ConfigtxlatorImage, images.ConfigtxlatorImage) + + if instance.UseTags() { + consoleTag = image.GetTag(arch, defaultimage.ConsoleTag, images.ConsoleTag) + initTag = image.GetTag(arch, defaultimage.ConsoleInitTag, images.ConsoleInitTag) + deployerTag = image.GetTag(arch, defaultimage.DeployerTag, images.DeployerTag) + configtxlatorTag = image.GetTag(arch, defaultimage.ConfigtxlatorTag, images.ConfigtxlatorTag) + } else { + consoleTag = image.GetTag(arch, defaultimage.ConsoleDigest, images.ConsoleDigest) + initTag = image.GetTag(arch, defaultimage.ConsoleInitDigest, images.ConsoleInitDigest) + deployerTag = image.GetTag(arch, defaultimage.DeployerDigest, images.DeployerDigest) + configtxlatorTag = image.GetTag(arch, defaultimage.ConfigtxlatorDigest, images.ConfigtxlatorDigest) + } + init.SetImage(initImage, initTag) + console.SetImage(consoleImage, consoleTag) + deployer.SetImage(deployerImage, deployerTag) + configtxlator.SetImage(configtxlatorImage, configtxlatorTag) + + resourcesRequest := instance.Spec.Resources + if !instance.Spec.UsingRemoteDB() { + couchdb := deployment.MustGetContainer(COUCHDB) + + if instance.Spec.ConnectionString != "" { + connectionURL, err := url.Parse(instance.Spec.ConnectionString) + if err != nil { + return err + } + if connectionURL.Host == "localhost:5984" { + if connectionURL.Scheme == "http" { + couchdbUser := connectionURL.User.Username() + couchdbPassword, set := connectionURL.User.Password() + if set { + couchdb.AppendEnvIfMissing("COUCHDB_USER", couchdbUser) + couchdb.AppendEnvIfMissing("COUCHDB_PASSWORD", couchdbPassword) + } + } + } + } + + couchdbImage = image.GetImage(registryURL, defaultimage.CouchDBImage, images.CouchDBImage) + if instance.Spec.UseTags != nil && *(instance.Spec.UseTags) { + couchdbTag = image.GetTag(arch, defaultimage.CouchDBTag, images.CouchDBTag) + } else { + couchdbTag = image.GetTag(arch, defaultimage.CouchDBDigest, images.CouchDBDigest) + + } + couchdb.SetImage(couchdbImage, couchdbTag) + + if resourcesRequest != nil { + if resourcesRequest.CouchDB != nil { + err := couchdb.UpdateResources(resourcesRequest.CouchDB) + if err != nil { + return errors.Wrap(err, "update resources for couchdb failed") + } + } + } + } + + if resourcesRequest != nil { + if resourcesRequest.Console != nil { + err := console.UpdateResources(resourcesRequest.Console) + if err != nil { + return errors.Wrap(err, "update resources for console failed") + } + } + + if resourcesRequest.Deployer != nil { + err := deployer.UpdateResources(resourcesRequest.Deployer) + if err != nil { + return errors.Wrap(err, "update resources for deployer failed") + } + } + + if resourcesRequest.Configtxlator != nil { + err := configtxlator.UpdateResources(resourcesRequest.Configtxlator) + if err != nil { + return errors.Wrap(err, "update resources for configtxlator failed") + } + } + } + + if err := setReplicas(instance, deployment); err != nil { + return err + } + setDeploymentStrategy(instance, deployment) + setSpreadConstraints(instance, deployment) + + kubeconfigSecretName := instance.Spec.KubeconfigSecretName + if kubeconfigSecretName != "" { + deployer.AppendVolumeMountIfMissing("kubeconfig", "/kubeconfig/") + deployment.AppendSecretVolumeIfMissing("kubeconfig", kubeconfigSecretName) + deployer.AppendEnvIfMissing("KUBECONFIGPATH", "/kubeconfig/kubeconfig.yaml") + } + + kubeconfigNamespace := instance.Spec.KubeconfigNamespace + if kubeconfigNamespace != "" { + deployer.AppendEnvIfMissing("DEPLOY_NAMESPACE", kubeconfigNamespace) + } else { + valueFrom := &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + } + deployer.AppendEnvVarValueFromIfMissing("DEPLOY_NAMESPACE", valueFrom) + } + + consoleOverrides, err := instance.Spec.GetOverridesConsole() + if err != nil { + return err + } + + initCommand := "" + if !instance.Spec.UsingRemoteDB() { + initCommand = "chmod -R 775 /opt/couchdb/data/ && chown -R -H 5984:5984 /opt/couchdb/data/ && chmod -R 775 /certs/ && chown -R -H 1000:1000 /certs/" + + couchDBVolumeMount := corev1.VolumeMount{ + Name: "couchdb", + MountPath: "/opt/couchdb/data", + SubPath: "data", + } + + certsVolumeMount := corev1.VolumeMount{ + Name: "couchdb", + MountPath: "/certs/", + SubPath: "tls", + } + init.SetVolumeMounts([]corev1.VolumeMount{couchDBVolumeMount, certsVolumeMount}) + console.AppendVolumeMountWithSubPathIfMissing("couchdb", "/certs/", "tls") + } + + if consoleOverrides.ActivityTrackerConsolePath != "" { + hostPath := "/var/log/at" + if consoleOverrides.ActivityTrackerHostPath != "" { + hostPath = consoleOverrides.ActivityTrackerHostPath + } + deployment.AppendHostPathVolumeIfMissing("activity", hostPath, corev1.HostPathDirectoryOrCreate) + + console.AppendVolumeMountWithSubPathIfMissing("activity", consoleOverrides.ActivityTrackerConsolePath, instance.Namespace) + init.AppendVolumeMountWithSubPathIfMissing("activity", consoleOverrides.ActivityTrackerConsolePath, instance.Namespace) + + if initCommand != "" { + initCommand += " && " + } + initCommand += "chmod -R 775 " + consoleOverrides.ActivityTrackerConsolePath + " && chown -R -H 1000:1000 " + consoleOverrides.ActivityTrackerConsolePath + } + + if initCommand == "" { + initCommand = "exit 0" + } + init.SetCommand([]string{"sh", "-c", initCommand}) + + return nil +} + +func (o *Override) GetAffinity(instance *current.IBPConsole) *corev1.Affinity { + arch := instance.Spec.Arch + zone := instance.Spec.Zone + region := instance.Spec.Region + nodeSelectorTerms := common.GetNodeSelectorTerms(arch, zone, region) + + affinity := &corev1.Affinity{} + + if len(nodeSelectorTerms[0].MatchExpressions) != 0 { + affinity.NodeAffinity = &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: nodeSelectorTerms, + }, + } + } + + return affinity +} + +func (o *Override) CreateCouchdbContainer() container.Container { + falsep := false + truep := true + portp := int64(5984) + + couchdb := &corev1.Container{ + Name: "couchdb", + Image: "", + ImagePullPolicy: "Always", + Env: []corev1.EnvVar{ + corev1.EnvVar{ + Name: "LICENSE", + Value: "accept", + }, + }, + SecurityContext: &corev1.SecurityContext{ + Privileged: &falsep, + AllowPrivilegeEscalation: &falsep, + ReadOnlyRootFilesystem: &falsep, + RunAsNonRoot: &truep, + RunAsUser: &portp, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + Add: []corev1.Capability{"NET_BIND_SERVICE", "CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"}, + }, + }, + Ports: []corev1.ContainerPort{ + corev1.ContainerPort{ + Name: "http", + ContainerPort: 5984, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(5984), + }, + }, + InitialDelaySeconds: 16, + TimeoutSeconds: 5, + FailureThreshold: 5, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(5984), + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 5, + FailureThreshold: 5, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1000Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1000Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + }, + }, + } + + return *container.New(couchdb) +} + +func setReplicas(instance *current.IBPConsole, d *dep.Deployment) error { + if instance.Spec.Replicas != nil { + if !instance.Spec.UsingRemoteDB() && *instance.Spec.Replicas > 1 { + return errors.New("replicas > 1 not allowed in IBPConsole") + } + + d.SetReplicas(instance.Spec.Replicas) + } + + return nil +} + +func setDeploymentStrategy(instance *current.IBPConsole, d *dep.Deployment) { + switch instance.Spec.UsingRemoteDB() { + case false: + d.Spec.Strategy = appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + } + case true: + opts := intstr.FromString("25%") + d.Spec.Strategy = appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + RollingUpdate: &appsv1.RollingUpdateDeployment{ + MaxUnavailable: &opts, + MaxSurge: &opts, + }, + } + } +} + +func setSpreadConstraints(instance *current.IBPConsole, d *dep.Deployment) { + if instance.Spec.UsingRemoteDB() { + d.Spec.Template.Spec.TopologySpreadConstraints = []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "topology.kubernetes.io/zone", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "type": "ibpconsole", + }, + }, + }, + } + } +} diff --git a/pkg/offering/base/console/override/deployment_test.go b/pkg/offering/base/console/override/deployment_test.go new file mode 100644 index 00000000..509106f7 --- /dev/null +++ b/pkg/offering/base/console/override/deployment_test.go @@ -0,0 +1,492 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "encoding/json" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("Base Console Deployment Overrides", func() { + Context("Deployment", func() { + var ( + overrider *override.Override + instance, instanceWithTags *current.IBPConsole + deployment *appsv1.Deployment + err error + usetagsFlag bool + ) + + BeforeEach(func() { + overrider = &override.Override{} + + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ServiceAccountName: "test", + AuthScheme: "couchdb", + DeployerTimeout: 30000, + Components: "athena-components", + Sessions: "athena-sessions", + System: "athena-system", + ConnectionString: "test.com", + Service: ¤t.Service{}, + Email: "xyz@ibm.com", + PasswordSecretName: "secret", + Password: "cGFzc3dvcmQ=", + KubeconfigSecretName: "kubeconfig-secret", + SystemChannel: "testchainid", + ImagePullSecrets: []string{"testsecret"}, + Images: ¤t.ConsoleImages{ + ConsoleInitImage: "fake-init-image", + ConsoleInitTag: "1234", + CouchDBImage: "fake-couchdb-image", + CouchDBTag: "1234", + ConsoleImage: "fake-console-image", + ConsoleTag: "1234", + ConfigtxlatorImage: "fake-configtxlator-image", + ConfigtxlatorTag: "1234", + DeployerImage: "fake-deployer-image", + DeployerTag: "1234", + }, + RegistryURL: "ghcr.io/ibm-blockchain/", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + TLSSecretName: "secret", + Resources: ¤t.ConsoleResources{}, + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + }, + } + deployment, err = util.GetDeploymentFromFile("../../../../../definitions/console/deployment.yaml") + Expect(err).NotTo(HaveOccurred()) + usetagsFlag = true + instanceWithTags = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ServiceAccountName: "test", + AuthScheme: "couchdb", + DeployerTimeout: 30000, + Components: "athena-components", + Sessions: "athena-sessions", + System: "athena-system", + ConnectionString: "test.com", + Service: ¤t.Service{}, + Email: "xyz@ibm.com", + PasswordSecretName: "secret", + Password: "cGFzc3dvcmQ=", + KubeconfigSecretName: "kubeconfig-secret", + SystemChannel: "testchainid", + ImagePullSecrets: []string{"testsecret"}, + Images: ¤t.ConsoleImages{ + ConsoleInitImage: "fake-init-image", + ConsoleInitTag: "1234", + CouchDBImage: "fake-couchdb-image", + CouchDBTag: "1234", + ConsoleImage: "fake-console-image", + ConsoleTag: "1234", + ConfigtxlatorImage: "fake-configtxlator-image", + ConfigtxlatorTag: "1234", + DeployerImage: "fake-deployer-image", + DeployerTag: "1234", + MustgatherImage: "fake-mustgather-image", + MustgatherTag: "1234", + }, + RegistryURL: "ghcr.io/ibm-blockchain/", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + TLSSecretName: "secret", + Resources: ¤t.ConsoleResources{}, + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + UseTags: &usetagsFlag, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.Deployment(instanceWithTags, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service account name", func() { + Expect(deployment.Spec.Template.Spec.ServiceAccountName).To(Equal(instanceWithTags.Name)) + }) + + By("image pull secret", func() { + Expect(deployment.Spec.Template.Spec.ImagePullSecrets).To(Equal([]corev1.LocalObjectReference{ + corev1.LocalObjectReference{ + Name: instanceWithTags.Spec.ImagePullSecrets[0], + }, + })) + }) + + By("setting DEFAULT_USER_PASSWORD_INITIAL env var", func() { + envVar := corev1.EnvVar{ + Name: "DEFAULT_USER_PASSWORD_INITIAL", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instanceWithTags.Spec.PasswordSecretName, + }, + Key: "password", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Containers[0].Env).To(ContainElement(envVar)) + }) + + By("setting TLS volume and volume mount if TLS secret name provided in spec", func() { + vm := corev1.VolumeMount{ + Name: "tls-certs", + MountPath: "/certs/tls", + } + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(vm)) + + v := corev1.Volume{ + Name: "tls-certs", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: instanceWithTags.Spec.TLSSecretName, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting deployer volume", func() { + v := corev1.Volume{ + Name: "deployer-template", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instanceWithTags.Name + "-deployer", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting console volume", func() { + v := corev1.Volume{ + Name: "template", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instanceWithTags.Name + "-console", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting affinity", func() { + expectedAffinity := overrider.GetAffinity(instanceWithTags) + Expect(deployment.Spec.Template.Spec.Affinity).To(Equal(expectedAffinity)) + }) + + ConsoleDeploymentCommonOverrides(instanceWithTags, deployment) + }) + + Context("using couchdb", func() { + BeforeEach(func() { + instance.Spec.ConnectionString = "localhost" + }) + + It("overrides values based on spec", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting couchdb TLS volume and volume mount", func() { + vm := corev1.VolumeMount{ + Name: "couchdb", + MountPath: "/opt/couchdb/data", + SubPath: "data", + } + Expect(deployment.Spec.Template.Spec.Containers[3].VolumeMounts).To(ContainElement(vm)) + Expect(deployment.Spec.Template.Spec.InitContainers[0].VolumeMounts).To(ContainElement(vm)) + }) + + By("setting cert volume and volume mount", func() { + vm := corev1.VolumeMount{ + Name: "couchdb", + MountPath: "/certs/", + SubPath: "tls", + } + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(vm)) + Expect(deployment.Spec.Template.Spec.InitContainers[0].VolumeMounts).To(ContainElement(vm)) + }) + }) + }) + + Context("not using TLS secret name", func() { + BeforeEach(func() { + instance.Spec.TLSSecretName = "" + }) + + It("overrides values based on spec", func() { + vm := corev1.VolumeMount{ + Name: "tls-certs", + MountPath: "/certs/tls", + } + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).NotTo(ContainElement(vm)) + + v := corev1.Volume{ + Name: "tls-certs", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: instance.Spec.TLSSecretName, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).NotTo(ContainElement(v)) + }) + }) + }) + + Context("enabling activity tracker", func() { + It("overrides mounts based on spec overrides", func() { + consoleOverride := ¤t.ConsoleOverridesConsole{ + ActivityTrackerConsolePath: "fake/path", + ActivityTrackerHostPath: "host/path", + } + consoleBytes, err := json.Marshal(consoleOverride) + Expect(err).NotTo(HaveOccurred()) + instance.Spec.ConfigOverride = ¤t.ConsoleOverrides{ + Console: &runtime.RawExtension{Raw: consoleBytes}, + } + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + vm := corev1.VolumeMount{ + Name: "activity", + MountPath: "fake/path", + SubPath: "", + } + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(vm)) + hostPathType := corev1.HostPathDirectoryOrCreate + v := corev1.Volume{ + Name: "activity", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "host/path", + Type: &hostPathType, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + + By("adding to init container command", func() { + Expect(deployment.Spec.Template.Spec.InitContainers[0].Command).To(Equal([]string{ + "sh", + "-c", + "chmod -R 775 fake/path && chown -R -H 1000:1000 fake/path", + })) + }) + }) + + It("overrides mounts based on spec overrides when only console path provided", func() { + consoleOverride := ¤t.ConsoleOverridesConsole{ + ActivityTrackerConsolePath: "fake/path", + } + consoleBytes, err := json.Marshal(consoleOverride) + Expect(err).NotTo(HaveOccurred()) + instance.Spec.ConfigOverride = ¤t.ConsoleOverrides{ + Console: &runtime.RawExtension{Raw: consoleBytes}, + } + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + vm := corev1.VolumeMount{ + Name: "activity", + MountPath: "fake/path", + SubPath: "", + } + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(vm)) + hostPathType := corev1.HostPathDirectoryOrCreate + v := corev1.Volume{ + Name: "activity", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/log/at", + Type: &hostPathType, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + It("adds command to init container command correctly when not using remote DB", func() { + consoleOverride := ¤t.ConsoleOverridesConsole{ + ActivityTrackerConsolePath: "fake/path", + ActivityTrackerHostPath: "host/path", + } + consoleBytes, err := json.Marshal(consoleOverride) + Expect(err).NotTo(HaveOccurred()) + instance.Spec.ConfigOverride = ¤t.ConsoleOverrides{ + Console: &runtime.RawExtension{Raw: consoleBytes}, + } + instance.Spec.ConnectionString = "localhost" + + err = overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("appending to init container command", func() { + Expect(deployment.Spec.Template.Spec.InitContainers[0].Command).To(Equal([]string{ + "sh", + "-c", + "chmod -R 775 /opt/couchdb/data/ && chown -R -H 5984:5984 /opt/couchdb/data/ && chmod -R 775 /certs/ && chown -R -H 1000:1000 /certs/ && chmod -R 775 fake/path && chown -R -H 1000:1000 fake/path", + })) + }) + }) + }) + + // TODO:OSS + // as both the console and deployer defaults are blank + // Context("update", func() { + // It("doesn't overrides images and tags values, when usetags flag is not set", func() { + // err := overrider.Deployment(instance, deployment, resources.Update) + // Expect(err).NotTo(HaveOccurred()) + // Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(ContainSubstring("ghcr.io/ibm-blockchain/fake-console-image@sha256")) + // Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(ContainSubstring("ghcr.io/ibm-blockchain/fake-init-image@sha256")) + // Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(ContainSubstring("ghcr.io/ibm-blockchain/fake-deployer-image@sha256")) + // Expect(deployment.Spec.Template.Spec.Containers[2].Image).To(ContainSubstring("ghcr.io/ibm-blockchain/fake-configtxlator-image@sha256")) + // }) + // }) + + Context("update when usetags set", func() { + It("overrides values based on spec, when usetags flag is set", func() { + err := overrider.Deployment(instanceWithTags, deployment, resources.Update) + Expect(err).NotTo(HaveOccurred()) + ConsoleDeploymentCommonOverrides(instanceWithTags, deployment) + }) + }) + + Context("Replicas", func() { + When("using remote db", func() { + It("using replica value from spec", func() { + replicas := int32(2) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(*deployment.Spec.Replicas).To(Equal(replicas)) + }) + }) + + When("Replicas is greater than 1", func() { + BeforeEach(func() { + instance.Spec.ConnectionString = "localhost" + }) + + It("returns an error", func() { + replicas := int32(2) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("replicas > 1 not allowed in IBPConsole")) + }) + }) + + When("Replicas is equal to 1", func() { + It("returns success", func() { + replicas := int32(1) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + When("Replicas is equal to 0", func() { + It("returns success", func() { + replicas := int32(0) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + When("Replicas is nil", func() { + It("returns success", func() { + instance.Spec.Replicas = nil + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + }) +}) + +func ConsoleDeploymentCommonOverrides(instance *current.IBPConsole, dep *appsv1.Deployment) { + By("setting init image", func() { + Expect(dep.Spec.Template.Spec.InitContainers[0].Image).To(Equal(fmt.Sprintf("%s%s:%s", instance.Spec.RegistryURL, instance.Spec.Images.ConsoleInitImage, instance.Spec.Images.ConsoleInitTag))) + }) + + By("setting console image", func() { + Expect(dep.Spec.Template.Spec.Containers[0].Image).To(Equal(fmt.Sprintf("%s%s:%s", instance.Spec.RegistryURL, instance.Spec.Images.ConsoleImage, instance.Spec.Images.ConsoleTag))) + }) + + By("setting deployer image", func() { + Expect(dep.Spec.Template.Spec.Containers[1].Image).To(Equal(fmt.Sprintf("%s%s:%s", instance.Spec.RegistryURL, instance.Spec.Images.DeployerImage, instance.Spec.Images.DeployerTag))) + }) + + By("setting configtxlator image", func() { + Expect(dep.Spec.Template.Spec.Containers[2].Image).To(Equal(fmt.Sprintf("%s%s:%s", instance.Spec.RegistryURL, instance.Spec.Images.ConfigtxlatorImage, instance.Spec.Images.ConfigtxlatorTag))) + }) + + By("setting replicas", func() { + Expect(dep.Spec.Replicas).To(Equal(instance.Spec.Replicas)) + }) + + By("setting KUBECONFIGPATH env var", func() { + envVar := corev1.EnvVar{ + Name: "KUBECONFIGPATH", + Value: "/kubeconfig/kubeconfig.yaml", + } + Expect(dep.Spec.Template.Spec.Containers[1].Env).To(ContainElement(envVar)) + }) +} diff --git a/pkg/offering/base/console/override/envcm.go b/pkg/offering/base/console/override/envcm.go new file mode 100644 index 00000000..85b7b212 --- /dev/null +++ b/pkg/offering/base/console/override/envcm.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) CM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateCM(instance, cm) + case resources.Update: + return o.UpdateCM(instance, cm) + } + + return nil +} + +func (o *Override) CreateCM(instance *current.IBPConsole, cm *corev1.ConfigMap) error { + cm.Data["HOST_URL"] = "https://" + instance.Spec.NetworkInfo.Domain + ":443" + + err := o.CommonCM(instance, cm) + if err != nil { + return err + } + + return nil +} + +func (o *Override) UpdateCM(instance *current.IBPConsole, cm *corev1.ConfigMap) error { + err := o.CommonCM(instance, cm) + if err != nil { + return err + } + + return nil +} + +func (o *Override) CommonCM(instance *current.IBPConsole, cm *corev1.ConfigMap) error { + if instance.Spec.ConnectionString != "" { + connectionString := instance.Spec.ConnectionString + cm.Data["DB_CONNECTION_STRING"] = connectionString + } + + if instance.Spec.System != "" { + system := instance.Spec.System + cm.Data["DB_SYSTEM"] = system + } + + if instance.Spec.TLSSecretName != "" { + cm.Data["KEY_FILE_PATH"] = "/certs/tls/tls.key" + cm.Data["PEM_FILE_PATH"] = "/certs/tls/tls.crt" + } else { + cm.Data["KEY_FILE_PATH"] = "/certs/tls.key" + cm.Data["PEM_FILE_PATH"] = "/certs/tls.crt" + } + + return nil +} diff --git a/pkg/offering/base/console/override/envcm_test.go b/pkg/offering/base/console/override/envcm_test.go new file mode 100644 index 00000000..1465a02d --- /dev/null +++ b/pkg/offering/base/console/override/envcm_test.go @@ -0,0 +1,86 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Console Env Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + ConnectionString: "connection_string", + TLSSecretName: "tls_secret_name", + System: "system1", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../definitions/console/configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("appropriately overrides the respective values for env config map", func() { + err := overrider.CM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + By("setting HOST_URL", func() { + Expect(cm.Data["HOST_URL"]).To(Equal(fmt.Sprintf("https://%s:443", instance.Spec.NetworkInfo.Domain))) + }) + + By("setting DB_CONNECTION_STRING", func() { + Expect(cm.Data["DB_CONNECTION_STRING"]).To(Equal(instance.Spec.ConnectionString)) + }) + + By("setting DB_SYSTEM", func() { + Expect(cm.Data["DB_SYSTEM"]).To(Equal(instance.Spec.System)) + }) + + By("setting KEY_FILE_PATH", func() { + Expect(cm.Data["KEY_FILE_PATH"]).To(Equal("/certs/tls/tls.key")) + }) + + By("setting PEM_FILE_PATH", func() { + Expect(cm.Data["PEM_FILE_PATH"]).To(Equal("/certs/tls/tls.crt")) + }) + }) + }) +}) diff --git a/pkg/offering/base/console/override/override.go b/pkg/offering/base/console/override/override.go new file mode 100644 index 00000000..4cc97779 --- /dev/null +++ b/pkg/offering/base/console/override/override.go @@ -0,0 +1,26 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +const ( + K8S string = "kubernetes" + OPENSHIFT string = "openshift" +) + +type Override struct{} diff --git a/pkg/offering/base/console/override/override_suite_test.go b/pkg/offering/base/console/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/base/console/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/base/console/override/pvc.go b/pkg/offering/base/console/override/pvc.go new file mode 100644 index 00000000..7f3f08c3 --- /dev/null +++ b/pkg/offering/base/console/override/pvc.go @@ -0,0 +1,86 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) PVC(object v1.Object, pvc *corev1.PersistentVolumeClaim, action resources.Action) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreatePVC(instance, pvc) + case resources.Update: + return o.UpdatePVC(instance, pvc) + } + + return nil +} + +func (o *Override) CreatePVC(instance *current.IBPConsole, pvc *corev1.PersistentVolumeClaim) error { + storage := instance.Spec.Storage + if storage != nil { + consoleStorage := storage.Console + if consoleStorage != nil { + if consoleStorage.Class != "" { + if consoleStorage.Class == "local" { + class := "manual" + pvc.Spec.StorageClassName = &class + } else { + pvc.Spec.StorageClassName = &consoleStorage.Class + } + } + + if consoleStorage.Size != "" { + quantity, err := resource.ParseQuantity(consoleStorage.Size) + if err != nil { + return err + } + resourceMap := pvc.Spec.Resources.Requests + if resourceMap == nil { + resourceMap = corev1.ResourceList{} + } + resourceMap[corev1.ResourceStorage] = quantity + pvc.Spec.Resources.Requests = resourceMap + } + } + } + + if pvc.ObjectMeta.Labels == nil { + pvc.ObjectMeta.Labels = map[string]string{} + } + if instance.Spec.Zone != "" { + pvc.ObjectMeta.Labels["zone"] = instance.Spec.Zone + } + + if instance.Spec.Region != "" { + pvc.ObjectMeta.Labels["region"] = instance.Spec.Region + } + + return nil +} + +func (o *Override) UpdatePVC(instance *current.IBPConsole, pvc *corev1.PersistentVolumeClaim) error { + return nil +} diff --git a/pkg/offering/base/console/override/pvc_test.go b/pkg/offering/base/console/override/pvc_test.go new file mode 100644 index 00000000..1c9895ca --- /dev/null +++ b/pkg/offering/base/console/override/pvc_test.go @@ -0,0 +1,99 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Console PVC Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + pvc *corev1.PersistentVolumeClaim + ) + + BeforeEach(func() { + var err error + + pvc, err = util.GetPVCFromFile("../../../../../definitions/console/pvc.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + Zone: "zone1", + Region: "region1", + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting storage class", func() { + Expect(pvc.Spec.StorageClassName).To(Equal(&instance.Spec.Storage.Console.Class)) + }) + + By("setting requested storage size", func() { + expectedRequests, err := resource.ParseQuantity(instance.Spec.Storage.Console.Size) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.Resources.Requests).To(Equal(corev1.ResourceList{corev1.ResourceStorage: expectedRequests})) + }) + + By("setting zone labels", func() { + Expect(pvc.ObjectMeta.Labels["zone"]).To(Equal(instance.Spec.Zone)) + }) + + By("setting region labels", func() { + Expect(pvc.ObjectMeta.Labels["region"]).To(Equal(instance.Spec.Region)) + }) + }) + + It("sets class to manual if spec used local", func() { + instance.Spec.Storage.Console.Class = "local" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.StorageClassName).To(Equal("manual")) + }) + + It("returns an error if invalid value for size is used", func() { + instance.Spec.Storage.Console.Size = "10x" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("quantities must match the regular expression")) + }) + }) +}) diff --git a/pkg/offering/base/console/override/service.go b/pkg/offering/base/console/override/service.go new file mode 100644 index 00000000..547e6904 --- /dev/null +++ b/pkg/offering/base/console/override/service.go @@ -0,0 +1,74 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Service(object v1.Object, service *corev1.Service, action resources.Action) error { + instance := object.(*current.IBPConsole) + + switch action { + case resources.Create: + return o.CreateService(instance, service) + case resources.Update: + return o.UpdateService(instance, service) + } + + return nil +} + +func (o *Override) CreateService(instance *current.IBPConsole, service *corev1.Service) error { + err := o.CommonServiceOverride(instance, service) + if err != nil { + return err + } + + consolePort := instance.Spec.NetworkInfo.ConsolePort + if consolePort != 0 && instance.Spec.Service != nil && instance.Spec.Service.Type == corev1.ServiceTypeNodePort { + service.Spec.Ports[0].NodePort = consolePort + } + + return nil +} + +func (o *Override) UpdateService(instance *current.IBPConsole, service *corev1.Service) error { + return nil +} + +func (o *Override) CommonServiceOverride(instance *current.IBPConsole, service *corev1.Service) error { + if instance.Spec.Service != nil { + serviceType := instance.Spec.Service.Type + if serviceType != "" { + service.Spec.Type = serviceType + } + } + + // ensures a default value + if instance.Spec.Proxying == nil { + t := true + instance.Spec.Proxying = &t + } + + return nil +} diff --git a/pkg/offering/base/console/override/service_test.go b/pkg/offering/base/console/override/service_test.go new file mode 100644 index 00000000..56e7b93a --- /dev/null +++ b/pkg/offering/base/console/override/service_test.go @@ -0,0 +1,99 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Console Service Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + service *corev1.Service + ) + + BeforeEach(func() { + var err error + + service, err = util.GetServiceFromFile("../../../../../definitions/console/service.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + Service: ¤t.Service{ + Type: corev1.ServiceTypeNodePort, + }, + NetworkInfo: ¤t.NetworkInfo{ + ConsolePort: 1234, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.Service(instance, service, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service type", func() { + Expect(service.Spec.Type).To(Equal(instance.Spec.Service.Type)) + }) + + By("instance spec proxying to ture", func() { + Expect(*instance.Spec.Proxying).To(Equal(true)) + }) + + By("setting console node port", func() { + Expect(service.Spec.Ports[0].NodePort).To(Equal(instance.Spec.NetworkInfo.ConsolePort)) + }) + }) + + It("overrides values based on spec with devmode on", func() { + instance.Spec.FeatureFlags = &consolev1.FeatureFlags{ + DevMode: true, + } + + err := overrider.Service(instance, service, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service type", func() { + Expect(service.Spec.Type).To(Equal(instance.Spec.Service.Type)) + }) + + By("instance spec proxying to ture", func() { + Expect(*instance.Spec.Proxying).To(Equal(true)) + }) + + By("setting console node port", func() { + Expect(service.Spec.Ports[0].NodePort).To(Equal(instance.Spec.NetworkInfo.ConsolePort)) + }) + }) + + }) +}) diff --git a/pkg/offering/base/console/override/serviceaccount.go b/pkg/offering/base/console/override/serviceaccount.go new file mode 100644 index 00000000..254f5e5c --- /dev/null +++ b/pkg/offering/base/console/override/serviceaccount.go @@ -0,0 +1,58 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) ServiceAccount(object v1.Object, sa *corev1.ServiceAccount, action resources.Action) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateServiceAccount(instance, sa) + case resources.Update: + return o.UpdateServiceAccount(instance, sa) + } + + return nil +} + +func (o *Override) CreateServiceAccount(instance *current.IBPConsole, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) UpdateServiceAccount(instance *current.IBPConsole, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) commonServiceAccount(instance *current.IBPConsole, sa *corev1.ServiceAccount) error { + for _, pullSecret := range instance.Spec.ImagePullSecrets { + imagePullSecret := corev1.LocalObjectReference{ + Name: pullSecret, + } + + sa.ImagePullSecrets = append(sa.ImagePullSecrets, imagePullSecret) + } + + return nil +} diff --git a/pkg/offering/base/console/override/serviceaccount_test.go b/pkg/offering/base/console/override/serviceaccount_test.go new file mode 100644 index 00000000..a9d18e96 --- /dev/null +++ b/pkg/offering/base/console/override/serviceaccount_test.go @@ -0,0 +1,82 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Service Account Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + sa *corev1.ServiceAccount + ) + + BeforeEach(func() { + var err error + + sa, err = util.GetServiceAccountFromFile("../../../../../definitions/console/serviceaccount.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPConsoleSpec{ + ImagePullSecrets: []string{"pullsecret1", "pullsecret2"}, + }, + } + }) + + Context("create", func() { + It("overrides values in service account, based on Console's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + Expect(sa.ImagePullSecrets[2].Name).To(Equal(instance.Spec.ImagePullSecrets[1])) + }) + }) + + Context("update", func() { + It("overrides values in service account, based on Console's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + Expect(sa.ImagePullSecrets[2].Name).To(Equal(instance.Spec.ImagePullSecrets[1])) + }) + }) + }) + }) +}) diff --git a/pkg/offering/base/orderer/mocks/certificate_manager.go b/pkg/offering/base/orderer/mocks/certificate_manager.go new file mode 100644 index 00000000..ca071dde --- /dev/null +++ b/pkg/offering/base/orderer/mocks/certificate_manager.go @@ -0,0 +1,379 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commona "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CertificateManager struct { + CheckCertificatesForExpireStub func(v1.Object, int64) (v1beta1.IBPCRStatusType, string, error) + checkCertificatesForExpireMutex sync.RWMutex + checkCertificatesForExpireArgsForCall []struct { + arg1 v1.Object + arg2 int64 + } + checkCertificatesForExpireReturns struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + } + checkCertificatesForExpireReturnsOnCall map[int]struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + } + GetDurationToNextRenewalStub func(common.SecretType, v1.Object, int64) (time.Duration, error) + getDurationToNextRenewalMutex sync.RWMutex + getDurationToNextRenewalArgsForCall []struct { + arg1 common.SecretType + arg2 v1.Object + arg3 int64 + } + getDurationToNextRenewalReturns struct { + result1 time.Duration + result2 error + } + getDurationToNextRenewalReturnsOnCall map[int]struct { + result1 time.Duration + result2 error + } + GetSignCertStub func(string, string) ([]byte, error) + getSignCertMutex sync.RWMutex + getSignCertArgsForCall []struct { + arg1 string + arg2 string + } + getSignCertReturns struct { + result1 []byte + result2 error + } + getSignCertReturnsOnCall map[int]struct { + result1 []byte + result2 error + } + RenewCertStub func(common.SecretType, certificate.Instance, *v1beta1.EnrollmentSpec, *commona.BCCSP, string, bool, bool) error + renewCertMutex sync.RWMutex + renewCertArgsForCall []struct { + arg1 common.SecretType + arg2 certificate.Instance + arg3 *v1beta1.EnrollmentSpec + arg4 *commona.BCCSP + arg5 string + arg6 bool + arg7 bool + } + renewCertReturns struct { + result1 error + } + renewCertReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CertificateManager) CheckCertificatesForExpire(arg1 v1.Object, arg2 int64) (v1beta1.IBPCRStatusType, string, error) { + fake.checkCertificatesForExpireMutex.Lock() + ret, specificReturn := fake.checkCertificatesForExpireReturnsOnCall[len(fake.checkCertificatesForExpireArgsForCall)] + fake.checkCertificatesForExpireArgsForCall = append(fake.checkCertificatesForExpireArgsForCall, struct { + arg1 v1.Object + arg2 int64 + }{arg1, arg2}) + stub := fake.CheckCertificatesForExpireStub + fakeReturns := fake.checkCertificatesForExpireReturns + fake.recordInvocation("CheckCertificatesForExpire", []interface{}{arg1, arg2}) + fake.checkCertificatesForExpireMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2, ret.result3 + } + return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 +} + +func (fake *CertificateManager) CheckCertificatesForExpireCallCount() int { + fake.checkCertificatesForExpireMutex.RLock() + defer fake.checkCertificatesForExpireMutex.RUnlock() + return len(fake.checkCertificatesForExpireArgsForCall) +} + +func (fake *CertificateManager) CheckCertificatesForExpireCalls(stub func(v1.Object, int64) (v1beta1.IBPCRStatusType, string, error)) { + fake.checkCertificatesForExpireMutex.Lock() + defer fake.checkCertificatesForExpireMutex.Unlock() + fake.CheckCertificatesForExpireStub = stub +} + +func (fake *CertificateManager) CheckCertificatesForExpireArgsForCall(i int) (v1.Object, int64) { + fake.checkCertificatesForExpireMutex.RLock() + defer fake.checkCertificatesForExpireMutex.RUnlock() + argsForCall := fake.checkCertificatesForExpireArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CertificateManager) CheckCertificatesForExpireReturns(result1 v1beta1.IBPCRStatusType, result2 string, result3 error) { + fake.checkCertificatesForExpireMutex.Lock() + defer fake.checkCertificatesForExpireMutex.Unlock() + fake.CheckCertificatesForExpireStub = nil + fake.checkCertificatesForExpireReturns = struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + }{result1, result2, result3} +} + +func (fake *CertificateManager) CheckCertificatesForExpireReturnsOnCall(i int, result1 v1beta1.IBPCRStatusType, result2 string, result3 error) { + fake.checkCertificatesForExpireMutex.Lock() + defer fake.checkCertificatesForExpireMutex.Unlock() + fake.CheckCertificatesForExpireStub = nil + if fake.checkCertificatesForExpireReturnsOnCall == nil { + fake.checkCertificatesForExpireReturnsOnCall = make(map[int]struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + }) + } + fake.checkCertificatesForExpireReturnsOnCall[i] = struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + }{result1, result2, result3} +} + +func (fake *CertificateManager) GetDurationToNextRenewal(arg1 common.SecretType, arg2 v1.Object, arg3 int64) (time.Duration, error) { + fake.getDurationToNextRenewalMutex.Lock() + ret, specificReturn := fake.getDurationToNextRenewalReturnsOnCall[len(fake.getDurationToNextRenewalArgsForCall)] + fake.getDurationToNextRenewalArgsForCall = append(fake.getDurationToNextRenewalArgsForCall, struct { + arg1 common.SecretType + arg2 v1.Object + arg3 int64 + }{arg1, arg2, arg3}) + stub := fake.GetDurationToNextRenewalStub + fakeReturns := fake.getDurationToNextRenewalReturns + fake.recordInvocation("GetDurationToNextRenewal", []interface{}{arg1, arg2, arg3}) + fake.getDurationToNextRenewalMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CertificateManager) GetDurationToNextRenewalCallCount() int { + fake.getDurationToNextRenewalMutex.RLock() + defer fake.getDurationToNextRenewalMutex.RUnlock() + return len(fake.getDurationToNextRenewalArgsForCall) +} + +func (fake *CertificateManager) GetDurationToNextRenewalCalls(stub func(common.SecretType, v1.Object, int64) (time.Duration, error)) { + fake.getDurationToNextRenewalMutex.Lock() + defer fake.getDurationToNextRenewalMutex.Unlock() + fake.GetDurationToNextRenewalStub = stub +} + +func (fake *CertificateManager) GetDurationToNextRenewalArgsForCall(i int) (common.SecretType, v1.Object, int64) { + fake.getDurationToNextRenewalMutex.RLock() + defer fake.getDurationToNextRenewalMutex.RUnlock() + argsForCall := fake.getDurationToNextRenewalArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *CertificateManager) GetDurationToNextRenewalReturns(result1 time.Duration, result2 error) { + fake.getDurationToNextRenewalMutex.Lock() + defer fake.getDurationToNextRenewalMutex.Unlock() + fake.GetDurationToNextRenewalStub = nil + fake.getDurationToNextRenewalReturns = struct { + result1 time.Duration + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetDurationToNextRenewalReturnsOnCall(i int, result1 time.Duration, result2 error) { + fake.getDurationToNextRenewalMutex.Lock() + defer fake.getDurationToNextRenewalMutex.Unlock() + fake.GetDurationToNextRenewalStub = nil + if fake.getDurationToNextRenewalReturnsOnCall == nil { + fake.getDurationToNextRenewalReturnsOnCall = make(map[int]struct { + result1 time.Duration + result2 error + }) + } + fake.getDurationToNextRenewalReturnsOnCall[i] = struct { + result1 time.Duration + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetSignCert(arg1 string, arg2 string) ([]byte, error) { + fake.getSignCertMutex.Lock() + ret, specificReturn := fake.getSignCertReturnsOnCall[len(fake.getSignCertArgsForCall)] + fake.getSignCertArgsForCall = append(fake.getSignCertArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.GetSignCertStub + fakeReturns := fake.getSignCertReturns + fake.recordInvocation("GetSignCert", []interface{}{arg1, arg2}) + fake.getSignCertMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CertificateManager) GetSignCertCallCount() int { + fake.getSignCertMutex.RLock() + defer fake.getSignCertMutex.RUnlock() + return len(fake.getSignCertArgsForCall) +} + +func (fake *CertificateManager) GetSignCertCalls(stub func(string, string) ([]byte, error)) { + fake.getSignCertMutex.Lock() + defer fake.getSignCertMutex.Unlock() + fake.GetSignCertStub = stub +} + +func (fake *CertificateManager) GetSignCertArgsForCall(i int) (string, string) { + fake.getSignCertMutex.RLock() + defer fake.getSignCertMutex.RUnlock() + argsForCall := fake.getSignCertArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CertificateManager) GetSignCertReturns(result1 []byte, result2 error) { + fake.getSignCertMutex.Lock() + defer fake.getSignCertMutex.Unlock() + fake.GetSignCertStub = nil + fake.getSignCertReturns = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetSignCertReturnsOnCall(i int, result1 []byte, result2 error) { + fake.getSignCertMutex.Lock() + defer fake.getSignCertMutex.Unlock() + fake.GetSignCertStub = nil + if fake.getSignCertReturnsOnCall == nil { + fake.getSignCertReturnsOnCall = make(map[int]struct { + result1 []byte + result2 error + }) + } + fake.getSignCertReturnsOnCall[i] = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) RenewCert(arg1 common.SecretType, arg2 certificate.Instance, arg3 *v1beta1.EnrollmentSpec, arg4 *commona.BCCSP, arg5 string, arg6 bool, arg7 bool) error { + fake.renewCertMutex.Lock() + ret, specificReturn := fake.renewCertReturnsOnCall[len(fake.renewCertArgsForCall)] + fake.renewCertArgsForCall = append(fake.renewCertArgsForCall, struct { + arg1 common.SecretType + arg2 certificate.Instance + arg3 *v1beta1.EnrollmentSpec + arg4 *commona.BCCSP + arg5 string + arg6 bool + arg7 bool + }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + stub := fake.RenewCertStub + fakeReturns := fake.renewCertReturns + fake.recordInvocation("RenewCert", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + fake.renewCertMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CertificateManager) RenewCertCallCount() int { + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + return len(fake.renewCertArgsForCall) +} + +func (fake *CertificateManager) RenewCertCalls(stub func(common.SecretType, certificate.Instance, *v1beta1.EnrollmentSpec, *commona.BCCSP, string, bool, bool) error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = stub +} + +func (fake *CertificateManager) RenewCertArgsForCall(i int) (common.SecretType, certificate.Instance, *v1beta1.EnrollmentSpec, *commona.BCCSP, string, bool, bool) { + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + argsForCall := fake.renewCertArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 +} + +func (fake *CertificateManager) RenewCertReturns(result1 error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = nil + fake.renewCertReturns = struct { + result1 error + }{result1} +} + +func (fake *CertificateManager) RenewCertReturnsOnCall(i int, result1 error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = nil + if fake.renewCertReturnsOnCall == nil { + fake.renewCertReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.renewCertReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CertificateManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkCertificatesForExpireMutex.RLock() + defer fake.checkCertificatesForExpireMutex.RUnlock() + fake.getDurationToNextRenewalMutex.RLock() + defer fake.getDurationToNextRenewalMutex.RUnlock() + fake.getSignCertMutex.RLock() + defer fake.getSignCertMutex.RUnlock() + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CertificateManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseorderer.CertificateManager = new(CertificateManager) diff --git a/pkg/offering/base/orderer/mocks/deployment_manager.go b/pkg/offering/base/orderer/mocks/deployment_manager.go new file mode 100644 index 00000000..287278d3 --- /dev/null +++ b/pkg/offering/base/orderer/mocks/deployment_manager.go @@ -0,0 +1,682 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + v1a "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type DeploymentManager struct { + CheckForSecretChangeStub func(v1.Object, string, func(string, *v1a.Deployment) bool) error + checkForSecretChangeMutex sync.RWMutex + checkForSecretChangeArgsForCall []struct { + arg1 v1.Object + arg2 string + arg3 func(string, *v1a.Deployment) bool + } + checkForSecretChangeReturns struct { + result1 error + } + checkForSecretChangeReturnsOnCall map[int]struct { + result1 error + } + CheckStateStub func(v1.Object) error + checkStateMutex sync.RWMutex + checkStateArgsForCall []struct { + arg1 v1.Object + } + checkStateReturns struct { + result1 error + } + checkStateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(v1.Object) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 v1.Object + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + ExistsStub func(v1.Object) bool + existsMutex sync.RWMutex + existsArgsForCall []struct { + arg1 v1.Object + } + existsReturns struct { + result1 bool + } + existsReturnsOnCall map[int]struct { + result1 bool + } + GetStub func(v1.Object) (client.Object, error) + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 v1.Object + } + getReturns struct { + result1 client.Object + result2 error + } + getReturnsOnCall map[int]struct { + result1 client.Object + result2 error + } + GetNameStub func(v1.Object) string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + arg1 v1.Object + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + ReconcileStub func(v1.Object, bool) error + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 v1.Object + arg2 bool + } + reconcileReturns struct { + result1 error + } + reconcileReturnsOnCall map[int]struct { + result1 error + } + RestoreStateStub func(v1.Object) error + restoreStateMutex sync.RWMutex + restoreStateArgsForCall []struct { + arg1 v1.Object + } + restoreStateReturns struct { + result1 error + } + restoreStateReturnsOnCall map[int]struct { + result1 error + } + SetCustomNameStub func(string) + setCustomNameMutex sync.RWMutex + setCustomNameArgsForCall []struct { + arg1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DeploymentManager) CheckForSecretChange(arg1 v1.Object, arg2 string, arg3 func(string, *v1a.Deployment) bool) error { + fake.checkForSecretChangeMutex.Lock() + ret, specificReturn := fake.checkForSecretChangeReturnsOnCall[len(fake.checkForSecretChangeArgsForCall)] + fake.checkForSecretChangeArgsForCall = append(fake.checkForSecretChangeArgsForCall, struct { + arg1 v1.Object + arg2 string + arg3 func(string, *v1a.Deployment) bool + }{arg1, arg2, arg3}) + stub := fake.CheckForSecretChangeStub + fakeReturns := fake.checkForSecretChangeReturns + fake.recordInvocation("CheckForSecretChange", []interface{}{arg1, arg2, arg3}) + fake.checkForSecretChangeMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) CheckForSecretChangeCallCount() int { + fake.checkForSecretChangeMutex.RLock() + defer fake.checkForSecretChangeMutex.RUnlock() + return len(fake.checkForSecretChangeArgsForCall) +} + +func (fake *DeploymentManager) CheckForSecretChangeCalls(stub func(v1.Object, string, func(string, *v1a.Deployment) bool) error) { + fake.checkForSecretChangeMutex.Lock() + defer fake.checkForSecretChangeMutex.Unlock() + fake.CheckForSecretChangeStub = stub +} + +func (fake *DeploymentManager) CheckForSecretChangeArgsForCall(i int) (v1.Object, string, func(string, *v1a.Deployment) bool) { + fake.checkForSecretChangeMutex.RLock() + defer fake.checkForSecretChangeMutex.RUnlock() + argsForCall := fake.checkForSecretChangeArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *DeploymentManager) CheckForSecretChangeReturns(result1 error) { + fake.checkForSecretChangeMutex.Lock() + defer fake.checkForSecretChangeMutex.Unlock() + fake.CheckForSecretChangeStub = nil + fake.checkForSecretChangeReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) CheckForSecretChangeReturnsOnCall(i int, result1 error) { + fake.checkForSecretChangeMutex.Lock() + defer fake.checkForSecretChangeMutex.Unlock() + fake.CheckForSecretChangeStub = nil + if fake.checkForSecretChangeReturnsOnCall == nil { + fake.checkForSecretChangeReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkForSecretChangeReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) CheckState(arg1 v1.Object) error { + fake.checkStateMutex.Lock() + ret, specificReturn := fake.checkStateReturnsOnCall[len(fake.checkStateArgsForCall)] + fake.checkStateArgsForCall = append(fake.checkStateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.CheckStateStub + fakeReturns := fake.checkStateReturns + fake.recordInvocation("CheckState", []interface{}{arg1}) + fake.checkStateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) CheckStateCallCount() int { + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + return len(fake.checkStateArgsForCall) +} + +func (fake *DeploymentManager) CheckStateCalls(stub func(v1.Object) error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = stub +} + +func (fake *DeploymentManager) CheckStateArgsForCall(i int) v1.Object { + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + argsForCall := fake.checkStateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) CheckStateReturns(result1 error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = nil + fake.checkStateReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) CheckStateReturnsOnCall(i int, result1 error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = nil + if fake.checkStateReturnsOnCall == nil { + fake.checkStateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkStateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) Delete(arg1 v1.Object) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *DeploymentManager) DeleteCalls(stub func(v1.Object) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *DeploymentManager) DeleteArgsForCall(i int) v1.Object { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) Exists(arg1 v1.Object) bool { + fake.existsMutex.Lock() + ret, specificReturn := fake.existsReturnsOnCall[len(fake.existsArgsForCall)] + fake.existsArgsForCall = append(fake.existsArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ExistsStub + fakeReturns := fake.existsReturns + fake.recordInvocation("Exists", []interface{}{arg1}) + fake.existsMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) ExistsCallCount() int { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + return len(fake.existsArgsForCall) +} + +func (fake *DeploymentManager) ExistsCalls(stub func(v1.Object) bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = stub +} + +func (fake *DeploymentManager) ExistsArgsForCall(i int) v1.Object { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + argsForCall := fake.existsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) ExistsReturns(result1 bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + fake.existsReturns = struct { + result1 bool + }{result1} +} + +func (fake *DeploymentManager) ExistsReturnsOnCall(i int, result1 bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + if fake.existsReturnsOnCall == nil { + fake.existsReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.existsReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *DeploymentManager) Get(arg1 v1.Object) (client.Object, error) { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *DeploymentManager) GetCalls(stub func(v1.Object) (client.Object, error)) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *DeploymentManager) GetArgsForCall(i int) v1.Object { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) GetReturns(result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetReturnsOnCall(i int, result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 client.Object + result2 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetName(arg1 v1.Object) string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{arg1}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *DeploymentManager) GetNameCalls(stub func(v1.Object) string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *DeploymentManager) GetNameArgsForCall(i int) v1.Object { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + argsForCall := fake.getNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *DeploymentManager) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *DeploymentManager) Reconcile(arg1 v1.Object, arg2 bool) error { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 v1.Object + arg2 bool + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *DeploymentManager) ReconcileCalls(stub func(v1.Object, bool) error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *DeploymentManager) ReconcileArgsForCall(i int) (v1.Object, bool) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *DeploymentManager) ReconcileReturns(result1 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) ReconcileReturnsOnCall(i int, result1 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) RestoreState(arg1 v1.Object) error { + fake.restoreStateMutex.Lock() + ret, specificReturn := fake.restoreStateReturnsOnCall[len(fake.restoreStateArgsForCall)] + fake.restoreStateArgsForCall = append(fake.restoreStateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.RestoreStateStub + fakeReturns := fake.restoreStateReturns + fake.recordInvocation("RestoreState", []interface{}{arg1}) + fake.restoreStateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) RestoreStateCallCount() int { + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + return len(fake.restoreStateArgsForCall) +} + +func (fake *DeploymentManager) RestoreStateCalls(stub func(v1.Object) error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = stub +} + +func (fake *DeploymentManager) RestoreStateArgsForCall(i int) v1.Object { + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + argsForCall := fake.restoreStateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) RestoreStateReturns(result1 error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = nil + fake.restoreStateReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) RestoreStateReturnsOnCall(i int, result1 error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = nil + if fake.restoreStateReturnsOnCall == nil { + fake.restoreStateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.restoreStateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) SetCustomName(arg1 string) { + fake.setCustomNameMutex.Lock() + fake.setCustomNameArgsForCall = append(fake.setCustomNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetCustomNameStub + fake.recordInvocation("SetCustomName", []interface{}{arg1}) + fake.setCustomNameMutex.Unlock() + if stub != nil { + fake.SetCustomNameStub(arg1) + } +} + +func (fake *DeploymentManager) SetCustomNameCallCount() int { + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + return len(fake.setCustomNameArgsForCall) +} + +func (fake *DeploymentManager) SetCustomNameCalls(stub func(string)) { + fake.setCustomNameMutex.Lock() + defer fake.setCustomNameMutex.Unlock() + fake.SetCustomNameStub = stub +} + +func (fake *DeploymentManager) SetCustomNameArgsForCall(i int) string { + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + argsForCall := fake.setCustomNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkForSecretChangeMutex.RLock() + defer fake.checkForSecretChangeMutex.RUnlock() + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DeploymentManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseorderer.DeploymentManager = new(DeploymentManager) diff --git a/pkg/offering/base/orderer/mocks/initializeibporderer.go b/pkg/offering/base/orderer/mocks/initializeibporderer.go new file mode 100644 index 00000000..b5f6baab --- /dev/null +++ b/pkg/offering/base/orderer/mocks/initializeibporderer.go @@ -0,0 +1,1376 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + v1 "k8s.io/api/core/v1" +) + +type InitializeIBPOrderer struct { + CheckIfAdminCertsUpdatedStub func(*v1beta1.IBPOrderer) (bool, error) + checkIfAdminCertsUpdatedMutex sync.RWMutex + checkIfAdminCertsUpdatedArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + checkIfAdminCertsUpdatedReturns struct { + result1 bool + result2 error + } + checkIfAdminCertsUpdatedReturnsOnCall map[int]struct { + result1 bool + result2 error + } + CreateStub func(initializer.OrdererConfig, initializer.IBPOrderer, string) (*initializer.Response, error) + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 initializer.OrdererConfig + arg2 initializer.IBPOrderer + arg3 string + } + createReturns struct { + result1 *initializer.Response + result2 error + } + createReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + CreateOrUpdateConfigMapStub func(*v1beta1.IBPOrderer, initializer.OrdererConfig) error + createOrUpdateConfigMapMutex sync.RWMutex + createOrUpdateConfigMapArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 initializer.OrdererConfig + } + createOrUpdateConfigMapReturns struct { + result1 error + } + createOrUpdateConfigMapReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(*v1beta1.IBPOrderer) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + GenerateSecretsStub func(common.SecretType, *v1beta1.IBPOrderer, *config.Response) error + generateSecretsMutex sync.RWMutex + generateSecretsArgsForCall []struct { + arg1 common.SecretType + arg2 *v1beta1.IBPOrderer + arg3 *config.Response + } + generateSecretsReturns struct { + result1 error + } + generateSecretsReturnsOnCall map[int]struct { + result1 error + } + GenerateSecretsFromResponseStub func(*v1beta1.IBPOrderer, *config.CryptoResponse) error + generateSecretsFromResponseMutex sync.RWMutex + generateSecretsFromResponseArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 *config.CryptoResponse + } + generateSecretsFromResponseReturns struct { + result1 error + } + generateSecretsFromResponseReturnsOnCall map[int]struct { + result1 error + } + GetConfigFromConfigMapStub func(*v1beta1.IBPOrderer) (*v1.ConfigMap, error) + getConfigFromConfigMapMutex sync.RWMutex + getConfigFromConfigMapArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + getConfigFromConfigMapReturns struct { + result1 *v1.ConfigMap + result2 error + } + getConfigFromConfigMapReturnsOnCall map[int]struct { + result1 *v1.ConfigMap + result2 error + } + GetCoreConfigFromBytesStub func(*v1beta1.IBPOrderer, []byte) (initializer.OrdererConfig, error) + getCoreConfigFromBytesMutex sync.RWMutex + getCoreConfigFromBytesArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 []byte + } + getCoreConfigFromBytesReturns struct { + result1 initializer.OrdererConfig + result2 error + } + getCoreConfigFromBytesReturnsOnCall map[int]struct { + result1 initializer.OrdererConfig + result2 error + } + GetCoreConfigFromFileStub func(*v1beta1.IBPOrderer, string) (initializer.OrdererConfig, error) + getCoreConfigFromFileMutex sync.RWMutex + getCoreConfigFromFileArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 string + } + getCoreConfigFromFileReturns struct { + result1 initializer.OrdererConfig + result2 error + } + getCoreConfigFromFileReturnsOnCall map[int]struct { + result1 initializer.OrdererConfig + result2 error + } + GetCryptoStub func(*v1beta1.IBPOrderer) (*config.CryptoResponse, error) + getCryptoMutex sync.RWMutex + getCryptoArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + getCryptoReturns struct { + result1 *config.CryptoResponse + result2 error + } + getCryptoReturnsOnCall map[int]struct { + result1 *config.CryptoResponse + result2 error + } + GetInitOrdererStub func(*v1beta1.IBPOrderer, string) (*initializer.Orderer, error) + getInitOrdererMutex sync.RWMutex + getInitOrdererArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 string + } + getInitOrdererReturns struct { + result1 *initializer.Orderer + result2 error + } + getInitOrdererReturnsOnCall map[int]struct { + result1 *initializer.Orderer + result2 error + } + GetUpdatedOrdererStub func(*v1beta1.IBPOrderer) (*initializer.Orderer, error) + getUpdatedOrdererMutex sync.RWMutex + getUpdatedOrdererArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + getUpdatedOrdererReturns struct { + result1 *initializer.Orderer + result2 error + } + getUpdatedOrdererReturnsOnCall map[int]struct { + result1 *initializer.Orderer + result2 error + } + MissingCryptoStub func(*v1beta1.IBPOrderer) bool + missingCryptoMutex sync.RWMutex + missingCryptoArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + missingCryptoReturns struct { + result1 bool + } + missingCryptoReturnsOnCall map[int]struct { + result1 bool + } + UpdateStub func(initializer.OrdererConfig, initializer.IBPOrderer) (*initializer.Response, error) + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 initializer.OrdererConfig + arg2 initializer.IBPOrderer + } + updateReturns struct { + result1 *initializer.Response + result2 error + } + updateReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + UpdateAdminSecretStub func(*v1beta1.IBPOrderer) error + updateAdminSecretMutex sync.RWMutex + updateAdminSecretArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + } + updateAdminSecretReturns struct { + result1 error + } + updateAdminSecretReturnsOnCall map[int]struct { + result1 error + } + UpdateSecretsStub func(common.SecretType, *v1beta1.IBPOrderer, *config.Response) error + updateSecretsMutex sync.RWMutex + updateSecretsArgsForCall []struct { + arg1 common.SecretType + arg2 *v1beta1.IBPOrderer + arg3 *config.Response + } + updateSecretsReturns struct { + result1 error + } + updateSecretsReturnsOnCall map[int]struct { + result1 error + } + UpdateSecretsFromResponseStub func(*v1beta1.IBPOrderer, *config.CryptoResponse) error + updateSecretsFromResponseMutex sync.RWMutex + updateSecretsFromResponseArgsForCall []struct { + arg1 *v1beta1.IBPOrderer + arg2 *config.CryptoResponse + } + updateSecretsFromResponseReturns struct { + result1 error + } + updateSecretsFromResponseReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *InitializeIBPOrderer) CheckIfAdminCertsUpdated(arg1 *v1beta1.IBPOrderer) (bool, error) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + ret, specificReturn := fake.checkIfAdminCertsUpdatedReturnsOnCall[len(fake.checkIfAdminCertsUpdatedArgsForCall)] + fake.checkIfAdminCertsUpdatedArgsForCall = append(fake.checkIfAdminCertsUpdatedArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.CheckIfAdminCertsUpdatedStub + fakeReturns := fake.checkIfAdminCertsUpdatedReturns + fake.recordInvocation("CheckIfAdminCertsUpdated", []interface{}{arg1}) + fake.checkIfAdminCertsUpdatedMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) CheckIfAdminCertsUpdatedCallCount() int { + fake.checkIfAdminCertsUpdatedMutex.RLock() + defer fake.checkIfAdminCertsUpdatedMutex.RUnlock() + return len(fake.checkIfAdminCertsUpdatedArgsForCall) +} + +func (fake *InitializeIBPOrderer) CheckIfAdminCertsUpdatedCalls(stub func(*v1beta1.IBPOrderer) (bool, error)) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + defer fake.checkIfAdminCertsUpdatedMutex.Unlock() + fake.CheckIfAdminCertsUpdatedStub = stub +} + +func (fake *InitializeIBPOrderer) CheckIfAdminCertsUpdatedArgsForCall(i int) *v1beta1.IBPOrderer { + fake.checkIfAdminCertsUpdatedMutex.RLock() + defer fake.checkIfAdminCertsUpdatedMutex.RUnlock() + argsForCall := fake.checkIfAdminCertsUpdatedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) CheckIfAdminCertsUpdatedReturns(result1 bool, result2 error) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + defer fake.checkIfAdminCertsUpdatedMutex.Unlock() + fake.CheckIfAdminCertsUpdatedStub = nil + fake.checkIfAdminCertsUpdatedReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) CheckIfAdminCertsUpdatedReturnsOnCall(i int, result1 bool, result2 error) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + defer fake.checkIfAdminCertsUpdatedMutex.Unlock() + fake.CheckIfAdminCertsUpdatedStub = nil + if fake.checkIfAdminCertsUpdatedReturnsOnCall == nil { + fake.checkIfAdminCertsUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.checkIfAdminCertsUpdatedReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) Create(arg1 initializer.OrdererConfig, arg2 initializer.IBPOrderer, arg3 string) (*initializer.Response, error) { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 initializer.OrdererConfig + arg2 initializer.IBPOrderer + arg3 string + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *InitializeIBPOrderer) CreateCalls(stub func(initializer.OrdererConfig, initializer.IBPOrderer, string) (*initializer.Response, error)) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *InitializeIBPOrderer) CreateArgsForCall(i int) (initializer.OrdererConfig, initializer.IBPOrderer, string) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *InitializeIBPOrderer) CreateReturns(result1 *initializer.Response, result2 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) CreateReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) CreateOrUpdateConfigMap(arg1 *v1beta1.IBPOrderer, arg2 initializer.OrdererConfig) error { + fake.createOrUpdateConfigMapMutex.Lock() + ret, specificReturn := fake.createOrUpdateConfigMapReturnsOnCall[len(fake.createOrUpdateConfigMapArgsForCall)] + fake.createOrUpdateConfigMapArgsForCall = append(fake.createOrUpdateConfigMapArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 initializer.OrdererConfig + }{arg1, arg2}) + stub := fake.CreateOrUpdateConfigMapStub + fakeReturns := fake.createOrUpdateConfigMapReturns + fake.recordInvocation("CreateOrUpdateConfigMap", []interface{}{arg1, arg2}) + fake.createOrUpdateConfigMapMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) CreateOrUpdateConfigMapCallCount() int { + fake.createOrUpdateConfigMapMutex.RLock() + defer fake.createOrUpdateConfigMapMutex.RUnlock() + return len(fake.createOrUpdateConfigMapArgsForCall) +} + +func (fake *InitializeIBPOrderer) CreateOrUpdateConfigMapCalls(stub func(*v1beta1.IBPOrderer, initializer.OrdererConfig) error) { + fake.createOrUpdateConfigMapMutex.Lock() + defer fake.createOrUpdateConfigMapMutex.Unlock() + fake.CreateOrUpdateConfigMapStub = stub +} + +func (fake *InitializeIBPOrderer) CreateOrUpdateConfigMapArgsForCall(i int) (*v1beta1.IBPOrderer, initializer.OrdererConfig) { + fake.createOrUpdateConfigMapMutex.RLock() + defer fake.createOrUpdateConfigMapMutex.RUnlock() + argsForCall := fake.createOrUpdateConfigMapArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) CreateOrUpdateConfigMapReturns(result1 error) { + fake.createOrUpdateConfigMapMutex.Lock() + defer fake.createOrUpdateConfigMapMutex.Unlock() + fake.CreateOrUpdateConfigMapStub = nil + fake.createOrUpdateConfigMapReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) CreateOrUpdateConfigMapReturnsOnCall(i int, result1 error) { + fake.createOrUpdateConfigMapMutex.Lock() + defer fake.createOrUpdateConfigMapMutex.Unlock() + fake.CreateOrUpdateConfigMapStub = nil + if fake.createOrUpdateConfigMapReturnsOnCall == nil { + fake.createOrUpdateConfigMapReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateConfigMapReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) Delete(arg1 *v1beta1.IBPOrderer) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *InitializeIBPOrderer) DeleteCalls(stub func(*v1beta1.IBPOrderer) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *InitializeIBPOrderer) DeleteArgsForCall(i int) *v1beta1.IBPOrderer { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) GenerateSecrets(arg1 common.SecretType, arg2 *v1beta1.IBPOrderer, arg3 *config.Response) error { + fake.generateSecretsMutex.Lock() + ret, specificReturn := fake.generateSecretsReturnsOnCall[len(fake.generateSecretsArgsForCall)] + fake.generateSecretsArgsForCall = append(fake.generateSecretsArgsForCall, struct { + arg1 common.SecretType + arg2 *v1beta1.IBPOrderer + arg3 *config.Response + }{arg1, arg2, arg3}) + stub := fake.GenerateSecretsStub + fakeReturns := fake.generateSecretsReturns + fake.recordInvocation("GenerateSecrets", []interface{}{arg1, arg2, arg3}) + fake.generateSecretsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) GenerateSecretsCallCount() int { + fake.generateSecretsMutex.RLock() + defer fake.generateSecretsMutex.RUnlock() + return len(fake.generateSecretsArgsForCall) +} + +func (fake *InitializeIBPOrderer) GenerateSecretsCalls(stub func(common.SecretType, *v1beta1.IBPOrderer, *config.Response) error) { + fake.generateSecretsMutex.Lock() + defer fake.generateSecretsMutex.Unlock() + fake.GenerateSecretsStub = stub +} + +func (fake *InitializeIBPOrderer) GenerateSecretsArgsForCall(i int) (common.SecretType, *v1beta1.IBPOrderer, *config.Response) { + fake.generateSecretsMutex.RLock() + defer fake.generateSecretsMutex.RUnlock() + argsForCall := fake.generateSecretsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *InitializeIBPOrderer) GenerateSecretsReturns(result1 error) { + fake.generateSecretsMutex.Lock() + defer fake.generateSecretsMutex.Unlock() + fake.GenerateSecretsStub = nil + fake.generateSecretsReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) GenerateSecretsReturnsOnCall(i int, result1 error) { + fake.generateSecretsMutex.Lock() + defer fake.generateSecretsMutex.Unlock() + fake.GenerateSecretsStub = nil + if fake.generateSecretsReturnsOnCall == nil { + fake.generateSecretsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.generateSecretsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) GenerateSecretsFromResponse(arg1 *v1beta1.IBPOrderer, arg2 *config.CryptoResponse) error { + fake.generateSecretsFromResponseMutex.Lock() + ret, specificReturn := fake.generateSecretsFromResponseReturnsOnCall[len(fake.generateSecretsFromResponseArgsForCall)] + fake.generateSecretsFromResponseArgsForCall = append(fake.generateSecretsFromResponseArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 *config.CryptoResponse + }{arg1, arg2}) + stub := fake.GenerateSecretsFromResponseStub + fakeReturns := fake.generateSecretsFromResponseReturns + fake.recordInvocation("GenerateSecretsFromResponse", []interface{}{arg1, arg2}) + fake.generateSecretsFromResponseMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) GenerateSecretsFromResponseCallCount() int { + fake.generateSecretsFromResponseMutex.RLock() + defer fake.generateSecretsFromResponseMutex.RUnlock() + return len(fake.generateSecretsFromResponseArgsForCall) +} + +func (fake *InitializeIBPOrderer) GenerateSecretsFromResponseCalls(stub func(*v1beta1.IBPOrderer, *config.CryptoResponse) error) { + fake.generateSecretsFromResponseMutex.Lock() + defer fake.generateSecretsFromResponseMutex.Unlock() + fake.GenerateSecretsFromResponseStub = stub +} + +func (fake *InitializeIBPOrderer) GenerateSecretsFromResponseArgsForCall(i int) (*v1beta1.IBPOrderer, *config.CryptoResponse) { + fake.generateSecretsFromResponseMutex.RLock() + defer fake.generateSecretsFromResponseMutex.RUnlock() + argsForCall := fake.generateSecretsFromResponseArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) GenerateSecretsFromResponseReturns(result1 error) { + fake.generateSecretsFromResponseMutex.Lock() + defer fake.generateSecretsFromResponseMutex.Unlock() + fake.GenerateSecretsFromResponseStub = nil + fake.generateSecretsFromResponseReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) GenerateSecretsFromResponseReturnsOnCall(i int, result1 error) { + fake.generateSecretsFromResponseMutex.Lock() + defer fake.generateSecretsFromResponseMutex.Unlock() + fake.GenerateSecretsFromResponseStub = nil + if fake.generateSecretsFromResponseReturnsOnCall == nil { + fake.generateSecretsFromResponseReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.generateSecretsFromResponseReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) GetConfigFromConfigMap(arg1 *v1beta1.IBPOrderer) (*v1.ConfigMap, error) { + fake.getConfigFromConfigMapMutex.Lock() + ret, specificReturn := fake.getConfigFromConfigMapReturnsOnCall[len(fake.getConfigFromConfigMapArgsForCall)] + fake.getConfigFromConfigMapArgsForCall = append(fake.getConfigFromConfigMapArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.GetConfigFromConfigMapStub + fakeReturns := fake.getConfigFromConfigMapReturns + fake.recordInvocation("GetConfigFromConfigMap", []interface{}{arg1}) + fake.getConfigFromConfigMapMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) GetConfigFromConfigMapCallCount() int { + fake.getConfigFromConfigMapMutex.RLock() + defer fake.getConfigFromConfigMapMutex.RUnlock() + return len(fake.getConfigFromConfigMapArgsForCall) +} + +func (fake *InitializeIBPOrderer) GetConfigFromConfigMapCalls(stub func(*v1beta1.IBPOrderer) (*v1.ConfigMap, error)) { + fake.getConfigFromConfigMapMutex.Lock() + defer fake.getConfigFromConfigMapMutex.Unlock() + fake.GetConfigFromConfigMapStub = stub +} + +func (fake *InitializeIBPOrderer) GetConfigFromConfigMapArgsForCall(i int) *v1beta1.IBPOrderer { + fake.getConfigFromConfigMapMutex.RLock() + defer fake.getConfigFromConfigMapMutex.RUnlock() + argsForCall := fake.getConfigFromConfigMapArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) GetConfigFromConfigMapReturns(result1 *v1.ConfigMap, result2 error) { + fake.getConfigFromConfigMapMutex.Lock() + defer fake.getConfigFromConfigMapMutex.Unlock() + fake.GetConfigFromConfigMapStub = nil + fake.getConfigFromConfigMapReturns = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetConfigFromConfigMapReturnsOnCall(i int, result1 *v1.ConfigMap, result2 error) { + fake.getConfigFromConfigMapMutex.Lock() + defer fake.getConfigFromConfigMapMutex.Unlock() + fake.GetConfigFromConfigMapStub = nil + if fake.getConfigFromConfigMapReturnsOnCall == nil { + fake.getConfigFromConfigMapReturnsOnCall = make(map[int]struct { + result1 *v1.ConfigMap + result2 error + }) + } + fake.getConfigFromConfigMapReturnsOnCall[i] = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromBytes(arg1 *v1beta1.IBPOrderer, arg2 []byte) (initializer.OrdererConfig, error) { + var arg2Copy []byte + if arg2 != nil { + arg2Copy = make([]byte, len(arg2)) + copy(arg2Copy, arg2) + } + fake.getCoreConfigFromBytesMutex.Lock() + ret, specificReturn := fake.getCoreConfigFromBytesReturnsOnCall[len(fake.getCoreConfigFromBytesArgsForCall)] + fake.getCoreConfigFromBytesArgsForCall = append(fake.getCoreConfigFromBytesArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 []byte + }{arg1, arg2Copy}) + stub := fake.GetCoreConfigFromBytesStub + fakeReturns := fake.getCoreConfigFromBytesReturns + fake.recordInvocation("GetCoreConfigFromBytes", []interface{}{arg1, arg2Copy}) + fake.getCoreConfigFromBytesMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromBytesCallCount() int { + fake.getCoreConfigFromBytesMutex.RLock() + defer fake.getCoreConfigFromBytesMutex.RUnlock() + return len(fake.getCoreConfigFromBytesArgsForCall) +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromBytesCalls(stub func(*v1beta1.IBPOrderer, []byte) (initializer.OrdererConfig, error)) { + fake.getCoreConfigFromBytesMutex.Lock() + defer fake.getCoreConfigFromBytesMutex.Unlock() + fake.GetCoreConfigFromBytesStub = stub +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromBytesArgsForCall(i int) (*v1beta1.IBPOrderer, []byte) { + fake.getCoreConfigFromBytesMutex.RLock() + defer fake.getCoreConfigFromBytesMutex.RUnlock() + argsForCall := fake.getCoreConfigFromBytesArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromBytesReturns(result1 initializer.OrdererConfig, result2 error) { + fake.getCoreConfigFromBytesMutex.Lock() + defer fake.getCoreConfigFromBytesMutex.Unlock() + fake.GetCoreConfigFromBytesStub = nil + fake.getCoreConfigFromBytesReturns = struct { + result1 initializer.OrdererConfig + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromBytesReturnsOnCall(i int, result1 initializer.OrdererConfig, result2 error) { + fake.getCoreConfigFromBytesMutex.Lock() + defer fake.getCoreConfigFromBytesMutex.Unlock() + fake.GetCoreConfigFromBytesStub = nil + if fake.getCoreConfigFromBytesReturnsOnCall == nil { + fake.getCoreConfigFromBytesReturnsOnCall = make(map[int]struct { + result1 initializer.OrdererConfig + result2 error + }) + } + fake.getCoreConfigFromBytesReturnsOnCall[i] = struct { + result1 initializer.OrdererConfig + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromFile(arg1 *v1beta1.IBPOrderer, arg2 string) (initializer.OrdererConfig, error) { + fake.getCoreConfigFromFileMutex.Lock() + ret, specificReturn := fake.getCoreConfigFromFileReturnsOnCall[len(fake.getCoreConfigFromFileArgsForCall)] + fake.getCoreConfigFromFileArgsForCall = append(fake.getCoreConfigFromFileArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 string + }{arg1, arg2}) + stub := fake.GetCoreConfigFromFileStub + fakeReturns := fake.getCoreConfigFromFileReturns + fake.recordInvocation("GetCoreConfigFromFile", []interface{}{arg1, arg2}) + fake.getCoreConfigFromFileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromFileCallCount() int { + fake.getCoreConfigFromFileMutex.RLock() + defer fake.getCoreConfigFromFileMutex.RUnlock() + return len(fake.getCoreConfigFromFileArgsForCall) +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromFileCalls(stub func(*v1beta1.IBPOrderer, string) (initializer.OrdererConfig, error)) { + fake.getCoreConfigFromFileMutex.Lock() + defer fake.getCoreConfigFromFileMutex.Unlock() + fake.GetCoreConfigFromFileStub = stub +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromFileArgsForCall(i int) (*v1beta1.IBPOrderer, string) { + fake.getCoreConfigFromFileMutex.RLock() + defer fake.getCoreConfigFromFileMutex.RUnlock() + argsForCall := fake.getCoreConfigFromFileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromFileReturns(result1 initializer.OrdererConfig, result2 error) { + fake.getCoreConfigFromFileMutex.Lock() + defer fake.getCoreConfigFromFileMutex.Unlock() + fake.GetCoreConfigFromFileStub = nil + fake.getCoreConfigFromFileReturns = struct { + result1 initializer.OrdererConfig + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetCoreConfigFromFileReturnsOnCall(i int, result1 initializer.OrdererConfig, result2 error) { + fake.getCoreConfigFromFileMutex.Lock() + defer fake.getCoreConfigFromFileMutex.Unlock() + fake.GetCoreConfigFromFileStub = nil + if fake.getCoreConfigFromFileReturnsOnCall == nil { + fake.getCoreConfigFromFileReturnsOnCall = make(map[int]struct { + result1 initializer.OrdererConfig + result2 error + }) + } + fake.getCoreConfigFromFileReturnsOnCall[i] = struct { + result1 initializer.OrdererConfig + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetCrypto(arg1 *v1beta1.IBPOrderer) (*config.CryptoResponse, error) { + fake.getCryptoMutex.Lock() + ret, specificReturn := fake.getCryptoReturnsOnCall[len(fake.getCryptoArgsForCall)] + fake.getCryptoArgsForCall = append(fake.getCryptoArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.GetCryptoStub + fakeReturns := fake.getCryptoReturns + fake.recordInvocation("GetCrypto", []interface{}{arg1}) + fake.getCryptoMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) GetCryptoCallCount() int { + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + return len(fake.getCryptoArgsForCall) +} + +func (fake *InitializeIBPOrderer) GetCryptoCalls(stub func(*v1beta1.IBPOrderer) (*config.CryptoResponse, error)) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = stub +} + +func (fake *InitializeIBPOrderer) GetCryptoArgsForCall(i int) *v1beta1.IBPOrderer { + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + argsForCall := fake.getCryptoArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) GetCryptoReturns(result1 *config.CryptoResponse, result2 error) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = nil + fake.getCryptoReturns = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetCryptoReturnsOnCall(i int, result1 *config.CryptoResponse, result2 error) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = nil + if fake.getCryptoReturnsOnCall == nil { + fake.getCryptoReturnsOnCall = make(map[int]struct { + result1 *config.CryptoResponse + result2 error + }) + } + fake.getCryptoReturnsOnCall[i] = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetInitOrderer(arg1 *v1beta1.IBPOrderer, arg2 string) (*initializer.Orderer, error) { + fake.getInitOrdererMutex.Lock() + ret, specificReturn := fake.getInitOrdererReturnsOnCall[len(fake.getInitOrdererArgsForCall)] + fake.getInitOrdererArgsForCall = append(fake.getInitOrdererArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 string + }{arg1, arg2}) + stub := fake.GetInitOrdererStub + fakeReturns := fake.getInitOrdererReturns + fake.recordInvocation("GetInitOrderer", []interface{}{arg1, arg2}) + fake.getInitOrdererMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) GetInitOrdererCallCount() int { + fake.getInitOrdererMutex.RLock() + defer fake.getInitOrdererMutex.RUnlock() + return len(fake.getInitOrdererArgsForCall) +} + +func (fake *InitializeIBPOrderer) GetInitOrdererCalls(stub func(*v1beta1.IBPOrderer, string) (*initializer.Orderer, error)) { + fake.getInitOrdererMutex.Lock() + defer fake.getInitOrdererMutex.Unlock() + fake.GetInitOrdererStub = stub +} + +func (fake *InitializeIBPOrderer) GetInitOrdererArgsForCall(i int) (*v1beta1.IBPOrderer, string) { + fake.getInitOrdererMutex.RLock() + defer fake.getInitOrdererMutex.RUnlock() + argsForCall := fake.getInitOrdererArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) GetInitOrdererReturns(result1 *initializer.Orderer, result2 error) { + fake.getInitOrdererMutex.Lock() + defer fake.getInitOrdererMutex.Unlock() + fake.GetInitOrdererStub = nil + fake.getInitOrdererReturns = struct { + result1 *initializer.Orderer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetInitOrdererReturnsOnCall(i int, result1 *initializer.Orderer, result2 error) { + fake.getInitOrdererMutex.Lock() + defer fake.getInitOrdererMutex.Unlock() + fake.GetInitOrdererStub = nil + if fake.getInitOrdererReturnsOnCall == nil { + fake.getInitOrdererReturnsOnCall = make(map[int]struct { + result1 *initializer.Orderer + result2 error + }) + } + fake.getInitOrdererReturnsOnCall[i] = struct { + result1 *initializer.Orderer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetUpdatedOrderer(arg1 *v1beta1.IBPOrderer) (*initializer.Orderer, error) { + fake.getUpdatedOrdererMutex.Lock() + ret, specificReturn := fake.getUpdatedOrdererReturnsOnCall[len(fake.getUpdatedOrdererArgsForCall)] + fake.getUpdatedOrdererArgsForCall = append(fake.getUpdatedOrdererArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.GetUpdatedOrdererStub + fakeReturns := fake.getUpdatedOrdererReturns + fake.recordInvocation("GetUpdatedOrderer", []interface{}{arg1}) + fake.getUpdatedOrdererMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) GetUpdatedOrdererCallCount() int { + fake.getUpdatedOrdererMutex.RLock() + defer fake.getUpdatedOrdererMutex.RUnlock() + return len(fake.getUpdatedOrdererArgsForCall) +} + +func (fake *InitializeIBPOrderer) GetUpdatedOrdererCalls(stub func(*v1beta1.IBPOrderer) (*initializer.Orderer, error)) { + fake.getUpdatedOrdererMutex.Lock() + defer fake.getUpdatedOrdererMutex.Unlock() + fake.GetUpdatedOrdererStub = stub +} + +func (fake *InitializeIBPOrderer) GetUpdatedOrdererArgsForCall(i int) *v1beta1.IBPOrderer { + fake.getUpdatedOrdererMutex.RLock() + defer fake.getUpdatedOrdererMutex.RUnlock() + argsForCall := fake.getUpdatedOrdererArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) GetUpdatedOrdererReturns(result1 *initializer.Orderer, result2 error) { + fake.getUpdatedOrdererMutex.Lock() + defer fake.getUpdatedOrdererMutex.Unlock() + fake.GetUpdatedOrdererStub = nil + fake.getUpdatedOrdererReturns = struct { + result1 *initializer.Orderer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) GetUpdatedOrdererReturnsOnCall(i int, result1 *initializer.Orderer, result2 error) { + fake.getUpdatedOrdererMutex.Lock() + defer fake.getUpdatedOrdererMutex.Unlock() + fake.GetUpdatedOrdererStub = nil + if fake.getUpdatedOrdererReturnsOnCall == nil { + fake.getUpdatedOrdererReturnsOnCall = make(map[int]struct { + result1 *initializer.Orderer + result2 error + }) + } + fake.getUpdatedOrdererReturnsOnCall[i] = struct { + result1 *initializer.Orderer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) MissingCrypto(arg1 *v1beta1.IBPOrderer) bool { + fake.missingCryptoMutex.Lock() + ret, specificReturn := fake.missingCryptoReturnsOnCall[len(fake.missingCryptoArgsForCall)] + fake.missingCryptoArgsForCall = append(fake.missingCryptoArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.MissingCryptoStub + fakeReturns := fake.missingCryptoReturns + fake.recordInvocation("MissingCrypto", []interface{}{arg1}) + fake.missingCryptoMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) MissingCryptoCallCount() int { + fake.missingCryptoMutex.RLock() + defer fake.missingCryptoMutex.RUnlock() + return len(fake.missingCryptoArgsForCall) +} + +func (fake *InitializeIBPOrderer) MissingCryptoCalls(stub func(*v1beta1.IBPOrderer) bool) { + fake.missingCryptoMutex.Lock() + defer fake.missingCryptoMutex.Unlock() + fake.MissingCryptoStub = stub +} + +func (fake *InitializeIBPOrderer) MissingCryptoArgsForCall(i int) *v1beta1.IBPOrderer { + fake.missingCryptoMutex.RLock() + defer fake.missingCryptoMutex.RUnlock() + argsForCall := fake.missingCryptoArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) MissingCryptoReturns(result1 bool) { + fake.missingCryptoMutex.Lock() + defer fake.missingCryptoMutex.Unlock() + fake.MissingCryptoStub = nil + fake.missingCryptoReturns = struct { + result1 bool + }{result1} +} + +func (fake *InitializeIBPOrderer) MissingCryptoReturnsOnCall(i int, result1 bool) { + fake.missingCryptoMutex.Lock() + defer fake.missingCryptoMutex.Unlock() + fake.MissingCryptoStub = nil + if fake.missingCryptoReturnsOnCall == nil { + fake.missingCryptoReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.missingCryptoReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *InitializeIBPOrderer) Update(arg1 initializer.OrdererConfig, arg2 initializer.IBPOrderer) (*initializer.Response, error) { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 initializer.OrdererConfig + arg2 initializer.IBPOrderer + }{arg1, arg2}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPOrderer) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *InitializeIBPOrderer) UpdateCalls(stub func(initializer.OrdererConfig, initializer.IBPOrderer) (*initializer.Response, error)) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *InitializeIBPOrderer) UpdateArgsForCall(i int) (initializer.OrdererConfig, initializer.IBPOrderer) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) UpdateReturns(result1 *initializer.Response, result2 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) UpdateReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPOrderer) UpdateAdminSecret(arg1 *v1beta1.IBPOrderer) error { + fake.updateAdminSecretMutex.Lock() + ret, specificReturn := fake.updateAdminSecretReturnsOnCall[len(fake.updateAdminSecretArgsForCall)] + fake.updateAdminSecretArgsForCall = append(fake.updateAdminSecretArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + }{arg1}) + stub := fake.UpdateAdminSecretStub + fakeReturns := fake.updateAdminSecretReturns + fake.recordInvocation("UpdateAdminSecret", []interface{}{arg1}) + fake.updateAdminSecretMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) UpdateAdminSecretCallCount() int { + fake.updateAdminSecretMutex.RLock() + defer fake.updateAdminSecretMutex.RUnlock() + return len(fake.updateAdminSecretArgsForCall) +} + +func (fake *InitializeIBPOrderer) UpdateAdminSecretCalls(stub func(*v1beta1.IBPOrderer) error) { + fake.updateAdminSecretMutex.Lock() + defer fake.updateAdminSecretMutex.Unlock() + fake.UpdateAdminSecretStub = stub +} + +func (fake *InitializeIBPOrderer) UpdateAdminSecretArgsForCall(i int) *v1beta1.IBPOrderer { + fake.updateAdminSecretMutex.RLock() + defer fake.updateAdminSecretMutex.RUnlock() + argsForCall := fake.updateAdminSecretArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPOrderer) UpdateAdminSecretReturns(result1 error) { + fake.updateAdminSecretMutex.Lock() + defer fake.updateAdminSecretMutex.Unlock() + fake.UpdateAdminSecretStub = nil + fake.updateAdminSecretReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) UpdateAdminSecretReturnsOnCall(i int, result1 error) { + fake.updateAdminSecretMutex.Lock() + defer fake.updateAdminSecretMutex.Unlock() + fake.UpdateAdminSecretStub = nil + if fake.updateAdminSecretReturnsOnCall == nil { + fake.updateAdminSecretReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateAdminSecretReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) UpdateSecrets(arg1 common.SecretType, arg2 *v1beta1.IBPOrderer, arg3 *config.Response) error { + fake.updateSecretsMutex.Lock() + ret, specificReturn := fake.updateSecretsReturnsOnCall[len(fake.updateSecretsArgsForCall)] + fake.updateSecretsArgsForCall = append(fake.updateSecretsArgsForCall, struct { + arg1 common.SecretType + arg2 *v1beta1.IBPOrderer + arg3 *config.Response + }{arg1, arg2, arg3}) + stub := fake.UpdateSecretsStub + fakeReturns := fake.updateSecretsReturns + fake.recordInvocation("UpdateSecrets", []interface{}{arg1, arg2, arg3}) + fake.updateSecretsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) UpdateSecretsCallCount() int { + fake.updateSecretsMutex.RLock() + defer fake.updateSecretsMutex.RUnlock() + return len(fake.updateSecretsArgsForCall) +} + +func (fake *InitializeIBPOrderer) UpdateSecretsCalls(stub func(common.SecretType, *v1beta1.IBPOrderer, *config.Response) error) { + fake.updateSecretsMutex.Lock() + defer fake.updateSecretsMutex.Unlock() + fake.UpdateSecretsStub = stub +} + +func (fake *InitializeIBPOrderer) UpdateSecretsArgsForCall(i int) (common.SecretType, *v1beta1.IBPOrderer, *config.Response) { + fake.updateSecretsMutex.RLock() + defer fake.updateSecretsMutex.RUnlock() + argsForCall := fake.updateSecretsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *InitializeIBPOrderer) UpdateSecretsReturns(result1 error) { + fake.updateSecretsMutex.Lock() + defer fake.updateSecretsMutex.Unlock() + fake.UpdateSecretsStub = nil + fake.updateSecretsReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) UpdateSecretsReturnsOnCall(i int, result1 error) { + fake.updateSecretsMutex.Lock() + defer fake.updateSecretsMutex.Unlock() + fake.UpdateSecretsStub = nil + if fake.updateSecretsReturnsOnCall == nil { + fake.updateSecretsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateSecretsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) UpdateSecretsFromResponse(arg1 *v1beta1.IBPOrderer, arg2 *config.CryptoResponse) error { + fake.updateSecretsFromResponseMutex.Lock() + ret, specificReturn := fake.updateSecretsFromResponseReturnsOnCall[len(fake.updateSecretsFromResponseArgsForCall)] + fake.updateSecretsFromResponseArgsForCall = append(fake.updateSecretsFromResponseArgsForCall, struct { + arg1 *v1beta1.IBPOrderer + arg2 *config.CryptoResponse + }{arg1, arg2}) + stub := fake.UpdateSecretsFromResponseStub + fakeReturns := fake.updateSecretsFromResponseReturns + fake.recordInvocation("UpdateSecretsFromResponse", []interface{}{arg1, arg2}) + fake.updateSecretsFromResponseMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPOrderer) UpdateSecretsFromResponseCallCount() int { + fake.updateSecretsFromResponseMutex.RLock() + defer fake.updateSecretsFromResponseMutex.RUnlock() + return len(fake.updateSecretsFromResponseArgsForCall) +} + +func (fake *InitializeIBPOrderer) UpdateSecretsFromResponseCalls(stub func(*v1beta1.IBPOrderer, *config.CryptoResponse) error) { + fake.updateSecretsFromResponseMutex.Lock() + defer fake.updateSecretsFromResponseMutex.Unlock() + fake.UpdateSecretsFromResponseStub = stub +} + +func (fake *InitializeIBPOrderer) UpdateSecretsFromResponseArgsForCall(i int) (*v1beta1.IBPOrderer, *config.CryptoResponse) { + fake.updateSecretsFromResponseMutex.RLock() + defer fake.updateSecretsFromResponseMutex.RUnlock() + argsForCall := fake.updateSecretsFromResponseArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPOrderer) UpdateSecretsFromResponseReturns(result1 error) { + fake.updateSecretsFromResponseMutex.Lock() + defer fake.updateSecretsFromResponseMutex.Unlock() + fake.UpdateSecretsFromResponseStub = nil + fake.updateSecretsFromResponseReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) UpdateSecretsFromResponseReturnsOnCall(i int, result1 error) { + fake.updateSecretsFromResponseMutex.Lock() + defer fake.updateSecretsFromResponseMutex.Unlock() + fake.UpdateSecretsFromResponseStub = nil + if fake.updateSecretsFromResponseReturnsOnCall == nil { + fake.updateSecretsFromResponseReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateSecretsFromResponseReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPOrderer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkIfAdminCertsUpdatedMutex.RLock() + defer fake.checkIfAdminCertsUpdatedMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.createOrUpdateConfigMapMutex.RLock() + defer fake.createOrUpdateConfigMapMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.generateSecretsMutex.RLock() + defer fake.generateSecretsMutex.RUnlock() + fake.generateSecretsFromResponseMutex.RLock() + defer fake.generateSecretsFromResponseMutex.RUnlock() + fake.getConfigFromConfigMapMutex.RLock() + defer fake.getConfigFromConfigMapMutex.RUnlock() + fake.getCoreConfigFromBytesMutex.RLock() + defer fake.getCoreConfigFromBytesMutex.RUnlock() + fake.getCoreConfigFromFileMutex.RLock() + defer fake.getCoreConfigFromFileMutex.RUnlock() + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + fake.getInitOrdererMutex.RLock() + defer fake.getInitOrdererMutex.RUnlock() + fake.getUpdatedOrdererMutex.RLock() + defer fake.getUpdatedOrdererMutex.RUnlock() + fake.missingCryptoMutex.RLock() + defer fake.missingCryptoMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateAdminSecretMutex.RLock() + defer fake.updateAdminSecretMutex.RUnlock() + fake.updateSecretsMutex.RLock() + defer fake.updateSecretsMutex.RUnlock() + fake.updateSecretsFromResponseMutex.RLock() + defer fake.updateSecretsFromResponseMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *InitializeIBPOrderer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseorderer.InitializeIBPOrderer = new(InitializeIBPOrderer) diff --git a/pkg/offering/base/orderer/mocks/node_manager.go b/pkg/offering/base/orderer/mocks/node_manager.go new file mode 100644 index 00000000..19f50a0c --- /dev/null +++ b/pkg/offering/base/orderer/mocks/node_manager.go @@ -0,0 +1,116 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" +) + +type NodeManager struct { + GetNodeStub func(int, map[string]*time.Timer, baseorderer.RestartManager) *baseorderer.Node + getNodeMutex sync.RWMutex + getNodeArgsForCall []struct { + arg1 int + arg2 map[string]*time.Timer + arg3 baseorderer.RestartManager + } + getNodeReturns struct { + result1 *baseorderer.Node + } + getNodeReturnsOnCall map[int]struct { + result1 *baseorderer.Node + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *NodeManager) GetNode(arg1 int, arg2 map[string]*time.Timer, arg3 baseorderer.RestartManager) *baseorderer.Node { + fake.getNodeMutex.Lock() + ret, specificReturn := fake.getNodeReturnsOnCall[len(fake.getNodeArgsForCall)] + fake.getNodeArgsForCall = append(fake.getNodeArgsForCall, struct { + arg1 int + arg2 map[string]*time.Timer + arg3 baseorderer.RestartManager + }{arg1, arg2, arg3}) + stub := fake.GetNodeStub + fakeReturns := fake.getNodeReturns + fake.recordInvocation("GetNode", []interface{}{arg1, arg2, arg3}) + fake.getNodeMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *NodeManager) GetNodeCallCount() int { + fake.getNodeMutex.RLock() + defer fake.getNodeMutex.RUnlock() + return len(fake.getNodeArgsForCall) +} + +func (fake *NodeManager) GetNodeCalls(stub func(int, map[string]*time.Timer, baseorderer.RestartManager) *baseorderer.Node) { + fake.getNodeMutex.Lock() + defer fake.getNodeMutex.Unlock() + fake.GetNodeStub = stub +} + +func (fake *NodeManager) GetNodeArgsForCall(i int) (int, map[string]*time.Timer, baseorderer.RestartManager) { + fake.getNodeMutex.RLock() + defer fake.getNodeMutex.RUnlock() + argsForCall := fake.getNodeArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *NodeManager) GetNodeReturns(result1 *baseorderer.Node) { + fake.getNodeMutex.Lock() + defer fake.getNodeMutex.Unlock() + fake.GetNodeStub = nil + fake.getNodeReturns = struct { + result1 *baseorderer.Node + }{result1} +} + +func (fake *NodeManager) GetNodeReturnsOnCall(i int, result1 *baseorderer.Node) { + fake.getNodeMutex.Lock() + defer fake.getNodeMutex.Unlock() + fake.GetNodeStub = nil + if fake.getNodeReturnsOnCall == nil { + fake.getNodeReturnsOnCall = make(map[int]struct { + result1 *baseorderer.Node + }) + } + fake.getNodeReturnsOnCall[i] = struct { + result1 *baseorderer.Node + }{result1} +} + +func (fake *NodeManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getNodeMutex.RLock() + defer fake.getNodeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *NodeManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseorderer.NodeManager = new(NodeManager) diff --git a/pkg/offering/base/orderer/mocks/restart_manager.go b/pkg/offering/base/orderer/mocks/restart_manager.go new file mode 100644 index 00000000..1262ac96 --- /dev/null +++ b/pkg/offering/base/orderer/mocks/restart_manager.go @@ -0,0 +1,486 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type RestartManager struct { + ForAdminCertUpdateStub func(v1.Object) error + forAdminCertUpdateMutex sync.RWMutex + forAdminCertUpdateArgsForCall []struct { + arg1 v1.Object + } + forAdminCertUpdateReturns struct { + result1 error + } + forAdminCertUpdateReturnsOnCall map[int]struct { + result1 error + } + ForCertUpdateStub func(common.SecretType, v1.Object) error + forCertUpdateMutex sync.RWMutex + forCertUpdateArgsForCall []struct { + arg1 common.SecretType + arg2 v1.Object + } + forCertUpdateReturns struct { + result1 error + } + forCertUpdateReturnsOnCall map[int]struct { + result1 error + } + ForConfigOverrideStub func(v1.Object) error + forConfigOverrideMutex sync.RWMutex + forConfigOverrideArgsForCall []struct { + arg1 v1.Object + } + forConfigOverrideReturns struct { + result1 error + } + forConfigOverrideReturnsOnCall map[int]struct { + result1 error + } + ForNodeOUStub func(v1.Object) error + forNodeOUMutex sync.RWMutex + forNodeOUArgsForCall []struct { + arg1 v1.Object + } + forNodeOUReturns struct { + result1 error + } + forNodeOUReturnsOnCall map[int]struct { + result1 error + } + ForRestartActionStub func(v1.Object) error + forRestartActionMutex sync.RWMutex + forRestartActionArgsForCall []struct { + arg1 v1.Object + } + forRestartActionReturns struct { + result1 error + } + forRestartActionReturnsOnCall map[int]struct { + result1 error + } + TriggerIfNeededStub func(restart.Instance) error + triggerIfNeededMutex sync.RWMutex + triggerIfNeededArgsForCall []struct { + arg1 restart.Instance + } + triggerIfNeededReturns struct { + result1 error + } + triggerIfNeededReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *RestartManager) ForAdminCertUpdate(arg1 v1.Object) error { + fake.forAdminCertUpdateMutex.Lock() + ret, specificReturn := fake.forAdminCertUpdateReturnsOnCall[len(fake.forAdminCertUpdateArgsForCall)] + fake.forAdminCertUpdateArgsForCall = append(fake.forAdminCertUpdateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForAdminCertUpdateStub + fakeReturns := fake.forAdminCertUpdateReturns + fake.recordInvocation("ForAdminCertUpdate", []interface{}{arg1}) + fake.forAdminCertUpdateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForAdminCertUpdateCallCount() int { + fake.forAdminCertUpdateMutex.RLock() + defer fake.forAdminCertUpdateMutex.RUnlock() + return len(fake.forAdminCertUpdateArgsForCall) +} + +func (fake *RestartManager) ForAdminCertUpdateCalls(stub func(v1.Object) error) { + fake.forAdminCertUpdateMutex.Lock() + defer fake.forAdminCertUpdateMutex.Unlock() + fake.ForAdminCertUpdateStub = stub +} + +func (fake *RestartManager) ForAdminCertUpdateArgsForCall(i int) v1.Object { + fake.forAdminCertUpdateMutex.RLock() + defer fake.forAdminCertUpdateMutex.RUnlock() + argsForCall := fake.forAdminCertUpdateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForAdminCertUpdateReturns(result1 error) { + fake.forAdminCertUpdateMutex.Lock() + defer fake.forAdminCertUpdateMutex.Unlock() + fake.ForAdminCertUpdateStub = nil + fake.forAdminCertUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForAdminCertUpdateReturnsOnCall(i int, result1 error) { + fake.forAdminCertUpdateMutex.Lock() + defer fake.forAdminCertUpdateMutex.Unlock() + fake.ForAdminCertUpdateStub = nil + if fake.forAdminCertUpdateReturnsOnCall == nil { + fake.forAdminCertUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forAdminCertUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForCertUpdate(arg1 common.SecretType, arg2 v1.Object) error { + fake.forCertUpdateMutex.Lock() + ret, specificReturn := fake.forCertUpdateReturnsOnCall[len(fake.forCertUpdateArgsForCall)] + fake.forCertUpdateArgsForCall = append(fake.forCertUpdateArgsForCall, struct { + arg1 common.SecretType + arg2 v1.Object + }{arg1, arg2}) + stub := fake.ForCertUpdateStub + fakeReturns := fake.forCertUpdateReturns + fake.recordInvocation("ForCertUpdate", []interface{}{arg1, arg2}) + fake.forCertUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForCertUpdateCallCount() int { + fake.forCertUpdateMutex.RLock() + defer fake.forCertUpdateMutex.RUnlock() + return len(fake.forCertUpdateArgsForCall) +} + +func (fake *RestartManager) ForCertUpdateCalls(stub func(common.SecretType, v1.Object) error) { + fake.forCertUpdateMutex.Lock() + defer fake.forCertUpdateMutex.Unlock() + fake.ForCertUpdateStub = stub +} + +func (fake *RestartManager) ForCertUpdateArgsForCall(i int) (common.SecretType, v1.Object) { + fake.forCertUpdateMutex.RLock() + defer fake.forCertUpdateMutex.RUnlock() + argsForCall := fake.forCertUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *RestartManager) ForCertUpdateReturns(result1 error) { + fake.forCertUpdateMutex.Lock() + defer fake.forCertUpdateMutex.Unlock() + fake.ForCertUpdateStub = nil + fake.forCertUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForCertUpdateReturnsOnCall(i int, result1 error) { + fake.forCertUpdateMutex.Lock() + defer fake.forCertUpdateMutex.Unlock() + fake.ForCertUpdateStub = nil + if fake.forCertUpdateReturnsOnCall == nil { + fake.forCertUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forCertUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForConfigOverride(arg1 v1.Object) error { + fake.forConfigOverrideMutex.Lock() + ret, specificReturn := fake.forConfigOverrideReturnsOnCall[len(fake.forConfigOverrideArgsForCall)] + fake.forConfigOverrideArgsForCall = append(fake.forConfigOverrideArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForConfigOverrideStub + fakeReturns := fake.forConfigOverrideReturns + fake.recordInvocation("ForConfigOverride", []interface{}{arg1}) + fake.forConfigOverrideMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForConfigOverrideCallCount() int { + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + return len(fake.forConfigOverrideArgsForCall) +} + +func (fake *RestartManager) ForConfigOverrideCalls(stub func(v1.Object) error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = stub +} + +func (fake *RestartManager) ForConfigOverrideArgsForCall(i int) v1.Object { + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + argsForCall := fake.forConfigOverrideArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForConfigOverrideReturns(result1 error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = nil + fake.forConfigOverrideReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForConfigOverrideReturnsOnCall(i int, result1 error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = nil + if fake.forConfigOverrideReturnsOnCall == nil { + fake.forConfigOverrideReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forConfigOverrideReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForNodeOU(arg1 v1.Object) error { + fake.forNodeOUMutex.Lock() + ret, specificReturn := fake.forNodeOUReturnsOnCall[len(fake.forNodeOUArgsForCall)] + fake.forNodeOUArgsForCall = append(fake.forNodeOUArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForNodeOUStub + fakeReturns := fake.forNodeOUReturns + fake.recordInvocation("ForNodeOU", []interface{}{arg1}) + fake.forNodeOUMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForNodeOUCallCount() int { + fake.forNodeOUMutex.RLock() + defer fake.forNodeOUMutex.RUnlock() + return len(fake.forNodeOUArgsForCall) +} + +func (fake *RestartManager) ForNodeOUCalls(stub func(v1.Object) error) { + fake.forNodeOUMutex.Lock() + defer fake.forNodeOUMutex.Unlock() + fake.ForNodeOUStub = stub +} + +func (fake *RestartManager) ForNodeOUArgsForCall(i int) v1.Object { + fake.forNodeOUMutex.RLock() + defer fake.forNodeOUMutex.RUnlock() + argsForCall := fake.forNodeOUArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForNodeOUReturns(result1 error) { + fake.forNodeOUMutex.Lock() + defer fake.forNodeOUMutex.Unlock() + fake.ForNodeOUStub = nil + fake.forNodeOUReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForNodeOUReturnsOnCall(i int, result1 error) { + fake.forNodeOUMutex.Lock() + defer fake.forNodeOUMutex.Unlock() + fake.ForNodeOUStub = nil + if fake.forNodeOUReturnsOnCall == nil { + fake.forNodeOUReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forNodeOUReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartAction(arg1 v1.Object) error { + fake.forRestartActionMutex.Lock() + ret, specificReturn := fake.forRestartActionReturnsOnCall[len(fake.forRestartActionArgsForCall)] + fake.forRestartActionArgsForCall = append(fake.forRestartActionArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForRestartActionStub + fakeReturns := fake.forRestartActionReturns + fake.recordInvocation("ForRestartAction", []interface{}{arg1}) + fake.forRestartActionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForRestartActionCallCount() int { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + return len(fake.forRestartActionArgsForCall) +} + +func (fake *RestartManager) ForRestartActionCalls(stub func(v1.Object) error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = stub +} + +func (fake *RestartManager) ForRestartActionArgsForCall(i int) v1.Object { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + argsForCall := fake.forRestartActionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForRestartActionReturns(result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + fake.forRestartActionReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartActionReturnsOnCall(i int, result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + if fake.forRestartActionReturnsOnCall == nil { + fake.forRestartActionReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forRestartActionReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeeded(arg1 restart.Instance) error { + fake.triggerIfNeededMutex.Lock() + ret, specificReturn := fake.triggerIfNeededReturnsOnCall[len(fake.triggerIfNeededArgsForCall)] + fake.triggerIfNeededArgsForCall = append(fake.triggerIfNeededArgsForCall, struct { + arg1 restart.Instance + }{arg1}) + stub := fake.TriggerIfNeededStub + fakeReturns := fake.triggerIfNeededReturns + fake.recordInvocation("TriggerIfNeeded", []interface{}{arg1}) + fake.triggerIfNeededMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) TriggerIfNeededCallCount() int { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + return len(fake.triggerIfNeededArgsForCall) +} + +func (fake *RestartManager) TriggerIfNeededCalls(stub func(restart.Instance) error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = stub +} + +func (fake *RestartManager) TriggerIfNeededArgsForCall(i int) restart.Instance { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + argsForCall := fake.triggerIfNeededArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) TriggerIfNeededReturns(result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + fake.triggerIfNeededReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeededReturnsOnCall(i int, result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + if fake.triggerIfNeededReturnsOnCall == nil { + fake.triggerIfNeededReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.triggerIfNeededReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.forAdminCertUpdateMutex.RLock() + defer fake.forAdminCertUpdateMutex.RUnlock() + fake.forCertUpdateMutex.RLock() + defer fake.forCertUpdateMutex.RUnlock() + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + fake.forNodeOUMutex.RLock() + defer fake.forNodeOUMutex.RUnlock() + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *RestartManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseorderer.RestartManager = new(RestartManager) diff --git a/pkg/offering/base/orderer/mocks/update.go b/pkg/offering/base/orderer/mocks/update.go new file mode 100644 index 00000000..8b34c2cf --- /dev/null +++ b/pkg/offering/base/orderer/mocks/update.go @@ -0,0 +1,1533 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" +) + +type Update struct { + CertificateCreatedStub func() bool + certificateCreatedMutex sync.RWMutex + certificateCreatedArgsForCall []struct { + } + certificateCreatedReturns struct { + result1 bool + } + certificateCreatedReturnsOnCall map[int]struct { + result1 bool + } + CertificateUpdatedStub func() bool + certificateUpdatedMutex sync.RWMutex + certificateUpdatedArgsForCall []struct { + } + certificateUpdatedReturns struct { + result1 bool + } + certificateUpdatedReturnsOnCall map[int]struct { + result1 bool + } + ConfigOverridesUpdatedStub func() bool + configOverridesUpdatedMutex sync.RWMutex + configOverridesUpdatedArgsForCall []struct { + } + configOverridesUpdatedReturns struct { + result1 bool + } + configOverridesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + CryptoBackupNeededStub func() bool + cryptoBackupNeededMutex sync.RWMutex + cryptoBackupNeededArgsForCall []struct { + } + cryptoBackupNeededReturns struct { + result1 bool + } + cryptoBackupNeededReturnsOnCall map[int]struct { + result1 bool + } + DeploymentUpdatedStub func() bool + deploymentUpdatedMutex sync.RWMutex + deploymentUpdatedArgsForCall []struct { + } + deploymentUpdatedReturns struct { + result1 bool + } + deploymentUpdatedReturnsOnCall map[int]struct { + result1 bool + } + EcertEnrollStub func() bool + ecertEnrollMutex sync.RWMutex + ecertEnrollArgsForCall []struct { + } + ecertEnrollReturns struct { + result1 bool + } + ecertEnrollReturnsOnCall map[int]struct { + result1 bool + } + EcertNewKeyReenrollStub func() bool + ecertNewKeyReenrollMutex sync.RWMutex + ecertNewKeyReenrollArgsForCall []struct { + } + ecertNewKeyReenrollReturns struct { + result1 bool + } + ecertNewKeyReenrollReturnsOnCall map[int]struct { + result1 bool + } + EcertReenrollNeededStub func() bool + ecertReenrollNeededMutex sync.RWMutex + ecertReenrollNeededArgsForCall []struct { + } + ecertReenrollNeededReturns struct { + result1 bool + } + ecertReenrollNeededReturnsOnCall map[int]struct { + result1 bool + } + EcertUpdatedStub func() bool + ecertUpdatedMutex sync.RWMutex + ecertUpdatedArgsForCall []struct { + } + ecertUpdatedReturns struct { + result1 bool + } + ecertUpdatedReturnsOnCall map[int]struct { + result1 bool + } + FabricVersionUpdatedStub func() bool + fabricVersionUpdatedMutex sync.RWMutex + fabricVersionUpdatedArgsForCall []struct { + } + fabricVersionUpdatedReturns struct { + result1 bool + } + fabricVersionUpdatedReturnsOnCall map[int]struct { + result1 bool + } + GetCreatedCertTypeStub func() common.SecretType + getCreatedCertTypeMutex sync.RWMutex + getCreatedCertTypeArgsForCall []struct { + } + getCreatedCertTypeReturns struct { + result1 common.SecretType + } + getCreatedCertTypeReturnsOnCall map[int]struct { + result1 common.SecretType + } + ImagesUpdatedStub func() bool + imagesUpdatedMutex sync.RWMutex + imagesUpdatedArgsForCall []struct { + } + imagesUpdatedReturns struct { + result1 bool + } + imagesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + MSPUpdatedStub func() bool + mSPUpdatedMutex sync.RWMutex + mSPUpdatedArgsForCall []struct { + } + mSPUpdatedReturns struct { + result1 bool + } + mSPUpdatedReturnsOnCall map[int]struct { + result1 bool + } + MigrateToV2Stub func() bool + migrateToV2Mutex sync.RWMutex + migrateToV2ArgsForCall []struct { + } + migrateToV2Returns struct { + result1 bool + } + migrateToV2ReturnsOnCall map[int]struct { + result1 bool + } + MigrateToV24Stub func() bool + migrateToV24Mutex sync.RWMutex + migrateToV24ArgsForCall []struct { + } + migrateToV24Returns struct { + result1 bool + } + migrateToV24ReturnsOnCall map[int]struct { + result1 bool + } + NodeOUUpdatedStub func() bool + nodeOUUpdatedMutex sync.RWMutex + nodeOUUpdatedArgsForCall []struct { + } + nodeOUUpdatedReturns struct { + result1 bool + } + nodeOUUpdatedReturnsOnCall map[int]struct { + result1 bool + } + OrdererTagUpdatedStub func() bool + ordererTagUpdatedMutex sync.RWMutex + ordererTagUpdatedArgsForCall []struct { + } + ordererTagUpdatedReturns struct { + result1 bool + } + ordererTagUpdatedReturnsOnCall map[int]struct { + result1 bool + } + RestartNeededStub func() bool + restartNeededMutex sync.RWMutex + restartNeededArgsForCall []struct { + } + restartNeededReturns struct { + result1 bool + } + restartNeededReturnsOnCall map[int]struct { + result1 bool + } + SpecUpdatedStub func() bool + specUpdatedMutex sync.RWMutex + specUpdatedArgsForCall []struct { + } + specUpdatedReturns struct { + result1 bool + } + specUpdatedReturnsOnCall map[int]struct { + result1 bool + } + TLSCertUpdatedStub func() bool + tLSCertUpdatedMutex sync.RWMutex + tLSCertUpdatedArgsForCall []struct { + } + tLSCertUpdatedReturns struct { + result1 bool + } + tLSCertUpdatedReturnsOnCall map[int]struct { + result1 bool + } + TLScertEnrollStub func() bool + tLScertEnrollMutex sync.RWMutex + tLScertEnrollArgsForCall []struct { + } + tLScertEnrollReturns struct { + result1 bool + } + tLScertEnrollReturnsOnCall map[int]struct { + result1 bool + } + TLScertNewKeyReenrollStub func() bool + tLScertNewKeyReenrollMutex sync.RWMutex + tLScertNewKeyReenrollArgsForCall []struct { + } + tLScertNewKeyReenrollReturns struct { + result1 bool + } + tLScertNewKeyReenrollReturnsOnCall map[int]struct { + result1 bool + } + TLScertReenrollNeededStub func() bool + tLScertReenrollNeededMutex sync.RWMutex + tLScertReenrollNeededArgsForCall []struct { + } + tLScertReenrollNeededReturns struct { + result1 bool + } + tLScertReenrollNeededReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Update) CertificateCreated() bool { + fake.certificateCreatedMutex.Lock() + ret, specificReturn := fake.certificateCreatedReturnsOnCall[len(fake.certificateCreatedArgsForCall)] + fake.certificateCreatedArgsForCall = append(fake.certificateCreatedArgsForCall, struct { + }{}) + stub := fake.CertificateCreatedStub + fakeReturns := fake.certificateCreatedReturns + fake.recordInvocation("CertificateCreated", []interface{}{}) + fake.certificateCreatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CertificateCreatedCallCount() int { + fake.certificateCreatedMutex.RLock() + defer fake.certificateCreatedMutex.RUnlock() + return len(fake.certificateCreatedArgsForCall) +} + +func (fake *Update) CertificateCreatedCalls(stub func() bool) { + fake.certificateCreatedMutex.Lock() + defer fake.certificateCreatedMutex.Unlock() + fake.CertificateCreatedStub = stub +} + +func (fake *Update) CertificateCreatedReturns(result1 bool) { + fake.certificateCreatedMutex.Lock() + defer fake.certificateCreatedMutex.Unlock() + fake.CertificateCreatedStub = nil + fake.certificateCreatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CertificateCreatedReturnsOnCall(i int, result1 bool) { + fake.certificateCreatedMutex.Lock() + defer fake.certificateCreatedMutex.Unlock() + fake.CertificateCreatedStub = nil + if fake.certificateCreatedReturnsOnCall == nil { + fake.certificateCreatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.certificateCreatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CertificateUpdated() bool { + fake.certificateUpdatedMutex.Lock() + ret, specificReturn := fake.certificateUpdatedReturnsOnCall[len(fake.certificateUpdatedArgsForCall)] + fake.certificateUpdatedArgsForCall = append(fake.certificateUpdatedArgsForCall, struct { + }{}) + stub := fake.CertificateUpdatedStub + fakeReturns := fake.certificateUpdatedReturns + fake.recordInvocation("CertificateUpdated", []interface{}{}) + fake.certificateUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CertificateUpdatedCallCount() int { + fake.certificateUpdatedMutex.RLock() + defer fake.certificateUpdatedMutex.RUnlock() + return len(fake.certificateUpdatedArgsForCall) +} + +func (fake *Update) CertificateUpdatedCalls(stub func() bool) { + fake.certificateUpdatedMutex.Lock() + defer fake.certificateUpdatedMutex.Unlock() + fake.CertificateUpdatedStub = stub +} + +func (fake *Update) CertificateUpdatedReturns(result1 bool) { + fake.certificateUpdatedMutex.Lock() + defer fake.certificateUpdatedMutex.Unlock() + fake.CertificateUpdatedStub = nil + fake.certificateUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CertificateUpdatedReturnsOnCall(i int, result1 bool) { + fake.certificateUpdatedMutex.Lock() + defer fake.certificateUpdatedMutex.Unlock() + fake.CertificateUpdatedStub = nil + if fake.certificateUpdatedReturnsOnCall == nil { + fake.certificateUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.certificateUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConfigOverridesUpdated() bool { + fake.configOverridesUpdatedMutex.Lock() + ret, specificReturn := fake.configOverridesUpdatedReturnsOnCall[len(fake.configOverridesUpdatedArgsForCall)] + fake.configOverridesUpdatedArgsForCall = append(fake.configOverridesUpdatedArgsForCall, struct { + }{}) + stub := fake.ConfigOverridesUpdatedStub + fakeReturns := fake.configOverridesUpdatedReturns + fake.recordInvocation("ConfigOverridesUpdated", []interface{}{}) + fake.configOverridesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ConfigOverridesUpdatedCallCount() int { + fake.configOverridesUpdatedMutex.RLock() + defer fake.configOverridesUpdatedMutex.RUnlock() + return len(fake.configOverridesUpdatedArgsForCall) +} + +func (fake *Update) ConfigOverridesUpdatedCalls(stub func() bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = stub +} + +func (fake *Update) ConfigOverridesUpdatedReturns(result1 bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = nil + fake.configOverridesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConfigOverridesUpdatedReturnsOnCall(i int, result1 bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = nil + if fake.configOverridesUpdatedReturnsOnCall == nil { + fake.configOverridesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.configOverridesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CryptoBackupNeeded() bool { + fake.cryptoBackupNeededMutex.Lock() + ret, specificReturn := fake.cryptoBackupNeededReturnsOnCall[len(fake.cryptoBackupNeededArgsForCall)] + fake.cryptoBackupNeededArgsForCall = append(fake.cryptoBackupNeededArgsForCall, struct { + }{}) + stub := fake.CryptoBackupNeededStub + fakeReturns := fake.cryptoBackupNeededReturns + fake.recordInvocation("CryptoBackupNeeded", []interface{}{}) + fake.cryptoBackupNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CryptoBackupNeededCallCount() int { + fake.cryptoBackupNeededMutex.RLock() + defer fake.cryptoBackupNeededMutex.RUnlock() + return len(fake.cryptoBackupNeededArgsForCall) +} + +func (fake *Update) CryptoBackupNeededCalls(stub func() bool) { + fake.cryptoBackupNeededMutex.Lock() + defer fake.cryptoBackupNeededMutex.Unlock() + fake.CryptoBackupNeededStub = stub +} + +func (fake *Update) CryptoBackupNeededReturns(result1 bool) { + fake.cryptoBackupNeededMutex.Lock() + defer fake.cryptoBackupNeededMutex.Unlock() + fake.CryptoBackupNeededStub = nil + fake.cryptoBackupNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CryptoBackupNeededReturnsOnCall(i int, result1 bool) { + fake.cryptoBackupNeededMutex.Lock() + defer fake.cryptoBackupNeededMutex.Unlock() + fake.CryptoBackupNeededStub = nil + if fake.cryptoBackupNeededReturnsOnCall == nil { + fake.cryptoBackupNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.cryptoBackupNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) DeploymentUpdated() bool { + fake.deploymentUpdatedMutex.Lock() + ret, specificReturn := fake.deploymentUpdatedReturnsOnCall[len(fake.deploymentUpdatedArgsForCall)] + fake.deploymentUpdatedArgsForCall = append(fake.deploymentUpdatedArgsForCall, struct { + }{}) + stub := fake.DeploymentUpdatedStub + fakeReturns := fake.deploymentUpdatedReturns + fake.recordInvocation("DeploymentUpdated", []interface{}{}) + fake.deploymentUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) DeploymentUpdatedCallCount() int { + fake.deploymentUpdatedMutex.RLock() + defer fake.deploymentUpdatedMutex.RUnlock() + return len(fake.deploymentUpdatedArgsForCall) +} + +func (fake *Update) DeploymentUpdatedCalls(stub func() bool) { + fake.deploymentUpdatedMutex.Lock() + defer fake.deploymentUpdatedMutex.Unlock() + fake.DeploymentUpdatedStub = stub +} + +func (fake *Update) DeploymentUpdatedReturns(result1 bool) { + fake.deploymentUpdatedMutex.Lock() + defer fake.deploymentUpdatedMutex.Unlock() + fake.DeploymentUpdatedStub = nil + fake.deploymentUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) DeploymentUpdatedReturnsOnCall(i int, result1 bool) { + fake.deploymentUpdatedMutex.Lock() + defer fake.deploymentUpdatedMutex.Unlock() + fake.DeploymentUpdatedStub = nil + if fake.deploymentUpdatedReturnsOnCall == nil { + fake.deploymentUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.deploymentUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertEnroll() bool { + fake.ecertEnrollMutex.Lock() + ret, specificReturn := fake.ecertEnrollReturnsOnCall[len(fake.ecertEnrollArgsForCall)] + fake.ecertEnrollArgsForCall = append(fake.ecertEnrollArgsForCall, struct { + }{}) + stub := fake.EcertEnrollStub + fakeReturns := fake.ecertEnrollReturns + fake.recordInvocation("EcertEnroll", []interface{}{}) + fake.ecertEnrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertEnrollCallCount() int { + fake.ecertEnrollMutex.RLock() + defer fake.ecertEnrollMutex.RUnlock() + return len(fake.ecertEnrollArgsForCall) +} + +func (fake *Update) EcertEnrollCalls(stub func() bool) { + fake.ecertEnrollMutex.Lock() + defer fake.ecertEnrollMutex.Unlock() + fake.EcertEnrollStub = stub +} + +func (fake *Update) EcertEnrollReturns(result1 bool) { + fake.ecertEnrollMutex.Lock() + defer fake.ecertEnrollMutex.Unlock() + fake.EcertEnrollStub = nil + fake.ecertEnrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertEnrollReturnsOnCall(i int, result1 bool) { + fake.ecertEnrollMutex.Lock() + defer fake.ecertEnrollMutex.Unlock() + fake.EcertEnrollStub = nil + if fake.ecertEnrollReturnsOnCall == nil { + fake.ecertEnrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertEnrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertNewKeyReenroll() bool { + fake.ecertNewKeyReenrollMutex.Lock() + ret, specificReturn := fake.ecertNewKeyReenrollReturnsOnCall[len(fake.ecertNewKeyReenrollArgsForCall)] + fake.ecertNewKeyReenrollArgsForCall = append(fake.ecertNewKeyReenrollArgsForCall, struct { + }{}) + stub := fake.EcertNewKeyReenrollStub + fakeReturns := fake.ecertNewKeyReenrollReturns + fake.recordInvocation("EcertNewKeyReenroll", []interface{}{}) + fake.ecertNewKeyReenrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertNewKeyReenrollCallCount() int { + fake.ecertNewKeyReenrollMutex.RLock() + defer fake.ecertNewKeyReenrollMutex.RUnlock() + return len(fake.ecertNewKeyReenrollArgsForCall) +} + +func (fake *Update) EcertNewKeyReenrollCalls(stub func() bool) { + fake.ecertNewKeyReenrollMutex.Lock() + defer fake.ecertNewKeyReenrollMutex.Unlock() + fake.EcertNewKeyReenrollStub = stub +} + +func (fake *Update) EcertNewKeyReenrollReturns(result1 bool) { + fake.ecertNewKeyReenrollMutex.Lock() + defer fake.ecertNewKeyReenrollMutex.Unlock() + fake.EcertNewKeyReenrollStub = nil + fake.ecertNewKeyReenrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertNewKeyReenrollReturnsOnCall(i int, result1 bool) { + fake.ecertNewKeyReenrollMutex.Lock() + defer fake.ecertNewKeyReenrollMutex.Unlock() + fake.EcertNewKeyReenrollStub = nil + if fake.ecertNewKeyReenrollReturnsOnCall == nil { + fake.ecertNewKeyReenrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertNewKeyReenrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertReenrollNeeded() bool { + fake.ecertReenrollNeededMutex.Lock() + ret, specificReturn := fake.ecertReenrollNeededReturnsOnCall[len(fake.ecertReenrollNeededArgsForCall)] + fake.ecertReenrollNeededArgsForCall = append(fake.ecertReenrollNeededArgsForCall, struct { + }{}) + stub := fake.EcertReenrollNeededStub + fakeReturns := fake.ecertReenrollNeededReturns + fake.recordInvocation("EcertReenrollNeeded", []interface{}{}) + fake.ecertReenrollNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertReenrollNeededCallCount() int { + fake.ecertReenrollNeededMutex.RLock() + defer fake.ecertReenrollNeededMutex.RUnlock() + return len(fake.ecertReenrollNeededArgsForCall) +} + +func (fake *Update) EcertReenrollNeededCalls(stub func() bool) { + fake.ecertReenrollNeededMutex.Lock() + defer fake.ecertReenrollNeededMutex.Unlock() + fake.EcertReenrollNeededStub = stub +} + +func (fake *Update) EcertReenrollNeededReturns(result1 bool) { + fake.ecertReenrollNeededMutex.Lock() + defer fake.ecertReenrollNeededMutex.Unlock() + fake.EcertReenrollNeededStub = nil + fake.ecertReenrollNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertReenrollNeededReturnsOnCall(i int, result1 bool) { + fake.ecertReenrollNeededMutex.Lock() + defer fake.ecertReenrollNeededMutex.Unlock() + fake.EcertReenrollNeededStub = nil + if fake.ecertReenrollNeededReturnsOnCall == nil { + fake.ecertReenrollNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertReenrollNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertUpdated() bool { + fake.ecertUpdatedMutex.Lock() + ret, specificReturn := fake.ecertUpdatedReturnsOnCall[len(fake.ecertUpdatedArgsForCall)] + fake.ecertUpdatedArgsForCall = append(fake.ecertUpdatedArgsForCall, struct { + }{}) + stub := fake.EcertUpdatedStub + fakeReturns := fake.ecertUpdatedReturns + fake.recordInvocation("EcertUpdated", []interface{}{}) + fake.ecertUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertUpdatedCallCount() int { + fake.ecertUpdatedMutex.RLock() + defer fake.ecertUpdatedMutex.RUnlock() + return len(fake.ecertUpdatedArgsForCall) +} + +func (fake *Update) EcertUpdatedCalls(stub func() bool) { + fake.ecertUpdatedMutex.Lock() + defer fake.ecertUpdatedMutex.Unlock() + fake.EcertUpdatedStub = stub +} + +func (fake *Update) EcertUpdatedReturns(result1 bool) { + fake.ecertUpdatedMutex.Lock() + defer fake.ecertUpdatedMutex.Unlock() + fake.EcertUpdatedStub = nil + fake.ecertUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertUpdatedReturnsOnCall(i int, result1 bool) { + fake.ecertUpdatedMutex.Lock() + defer fake.ecertUpdatedMutex.Unlock() + fake.EcertUpdatedStub = nil + if fake.ecertUpdatedReturnsOnCall == nil { + fake.ecertUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdated() bool { + fake.fabricVersionUpdatedMutex.Lock() + ret, specificReturn := fake.fabricVersionUpdatedReturnsOnCall[len(fake.fabricVersionUpdatedArgsForCall)] + fake.fabricVersionUpdatedArgsForCall = append(fake.fabricVersionUpdatedArgsForCall, struct { + }{}) + stub := fake.FabricVersionUpdatedStub + fakeReturns := fake.fabricVersionUpdatedReturns + fake.recordInvocation("FabricVersionUpdated", []interface{}{}) + fake.fabricVersionUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) FabricVersionUpdatedCallCount() int { + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + return len(fake.fabricVersionUpdatedArgsForCall) +} + +func (fake *Update) FabricVersionUpdatedCalls(stub func() bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = stub +} + +func (fake *Update) FabricVersionUpdatedReturns(result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + fake.fabricVersionUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdatedReturnsOnCall(i int, result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + if fake.fabricVersionUpdatedReturnsOnCall == nil { + fake.fabricVersionUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.fabricVersionUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) GetCreatedCertType() common.SecretType { + fake.getCreatedCertTypeMutex.Lock() + ret, specificReturn := fake.getCreatedCertTypeReturnsOnCall[len(fake.getCreatedCertTypeArgsForCall)] + fake.getCreatedCertTypeArgsForCall = append(fake.getCreatedCertTypeArgsForCall, struct { + }{}) + stub := fake.GetCreatedCertTypeStub + fakeReturns := fake.getCreatedCertTypeReturns + fake.recordInvocation("GetCreatedCertType", []interface{}{}) + fake.getCreatedCertTypeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) GetCreatedCertTypeCallCount() int { + fake.getCreatedCertTypeMutex.RLock() + defer fake.getCreatedCertTypeMutex.RUnlock() + return len(fake.getCreatedCertTypeArgsForCall) +} + +func (fake *Update) GetCreatedCertTypeCalls(stub func() common.SecretType) { + fake.getCreatedCertTypeMutex.Lock() + defer fake.getCreatedCertTypeMutex.Unlock() + fake.GetCreatedCertTypeStub = stub +} + +func (fake *Update) GetCreatedCertTypeReturns(result1 common.SecretType) { + fake.getCreatedCertTypeMutex.Lock() + defer fake.getCreatedCertTypeMutex.Unlock() + fake.GetCreatedCertTypeStub = nil + fake.getCreatedCertTypeReturns = struct { + result1 common.SecretType + }{result1} +} + +func (fake *Update) GetCreatedCertTypeReturnsOnCall(i int, result1 common.SecretType) { + fake.getCreatedCertTypeMutex.Lock() + defer fake.getCreatedCertTypeMutex.Unlock() + fake.GetCreatedCertTypeStub = nil + if fake.getCreatedCertTypeReturnsOnCall == nil { + fake.getCreatedCertTypeReturnsOnCall = make(map[int]struct { + result1 common.SecretType + }) + } + fake.getCreatedCertTypeReturnsOnCall[i] = struct { + result1 common.SecretType + }{result1} +} + +func (fake *Update) ImagesUpdated() bool { + fake.imagesUpdatedMutex.Lock() + ret, specificReturn := fake.imagesUpdatedReturnsOnCall[len(fake.imagesUpdatedArgsForCall)] + fake.imagesUpdatedArgsForCall = append(fake.imagesUpdatedArgsForCall, struct { + }{}) + stub := fake.ImagesUpdatedStub + fakeReturns := fake.imagesUpdatedReturns + fake.recordInvocation("ImagesUpdated", []interface{}{}) + fake.imagesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ImagesUpdatedCallCount() int { + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + return len(fake.imagesUpdatedArgsForCall) +} + +func (fake *Update) ImagesUpdatedCalls(stub func() bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = stub +} + +func (fake *Update) ImagesUpdatedReturns(result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + fake.imagesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdatedReturnsOnCall(i int, result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + if fake.imagesUpdatedReturnsOnCall == nil { + fake.imagesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) MSPUpdated() bool { + fake.mSPUpdatedMutex.Lock() + ret, specificReturn := fake.mSPUpdatedReturnsOnCall[len(fake.mSPUpdatedArgsForCall)] + fake.mSPUpdatedArgsForCall = append(fake.mSPUpdatedArgsForCall, struct { + }{}) + stub := fake.MSPUpdatedStub + fakeReturns := fake.mSPUpdatedReturns + fake.recordInvocation("MSPUpdated", []interface{}{}) + fake.mSPUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MSPUpdatedCallCount() int { + fake.mSPUpdatedMutex.RLock() + defer fake.mSPUpdatedMutex.RUnlock() + return len(fake.mSPUpdatedArgsForCall) +} + +func (fake *Update) MSPUpdatedCalls(stub func() bool) { + fake.mSPUpdatedMutex.Lock() + defer fake.mSPUpdatedMutex.Unlock() + fake.MSPUpdatedStub = stub +} + +func (fake *Update) MSPUpdatedReturns(result1 bool) { + fake.mSPUpdatedMutex.Lock() + defer fake.mSPUpdatedMutex.Unlock() + fake.MSPUpdatedStub = nil + fake.mSPUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MSPUpdatedReturnsOnCall(i int, result1 bool) { + fake.mSPUpdatedMutex.Lock() + defer fake.mSPUpdatedMutex.Unlock() + fake.MSPUpdatedStub = nil + if fake.mSPUpdatedReturnsOnCall == nil { + fake.mSPUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.mSPUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV2() bool { + fake.migrateToV2Mutex.Lock() + ret, specificReturn := fake.migrateToV2ReturnsOnCall[len(fake.migrateToV2ArgsForCall)] + fake.migrateToV2ArgsForCall = append(fake.migrateToV2ArgsForCall, struct { + }{}) + stub := fake.MigrateToV2Stub + fakeReturns := fake.migrateToV2Returns + fake.recordInvocation("MigrateToV2", []interface{}{}) + fake.migrateToV2Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MigrateToV2CallCount() int { + fake.migrateToV2Mutex.RLock() + defer fake.migrateToV2Mutex.RUnlock() + return len(fake.migrateToV2ArgsForCall) +} + +func (fake *Update) MigrateToV2Calls(stub func() bool) { + fake.migrateToV2Mutex.Lock() + defer fake.migrateToV2Mutex.Unlock() + fake.MigrateToV2Stub = stub +} + +func (fake *Update) MigrateToV2Returns(result1 bool) { + fake.migrateToV2Mutex.Lock() + defer fake.migrateToV2Mutex.Unlock() + fake.MigrateToV2Stub = nil + fake.migrateToV2Returns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV2ReturnsOnCall(i int, result1 bool) { + fake.migrateToV2Mutex.Lock() + defer fake.migrateToV2Mutex.Unlock() + fake.MigrateToV2Stub = nil + if fake.migrateToV2ReturnsOnCall == nil { + fake.migrateToV2ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrateToV2ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV24() bool { + fake.migrateToV24Mutex.Lock() + ret, specificReturn := fake.migrateToV24ReturnsOnCall[len(fake.migrateToV24ArgsForCall)] + fake.migrateToV24ArgsForCall = append(fake.migrateToV24ArgsForCall, struct { + }{}) + stub := fake.MigrateToV24Stub + fakeReturns := fake.migrateToV24Returns + fake.recordInvocation("MigrateToV24", []interface{}{}) + fake.migrateToV24Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MigrateToV24CallCount() int { + fake.migrateToV24Mutex.RLock() + defer fake.migrateToV24Mutex.RUnlock() + return len(fake.migrateToV24ArgsForCall) +} + +func (fake *Update) MigrateToV24Calls(stub func() bool) { + fake.migrateToV24Mutex.Lock() + defer fake.migrateToV24Mutex.Unlock() + fake.MigrateToV24Stub = stub +} + +func (fake *Update) MigrateToV24Returns(result1 bool) { + fake.migrateToV24Mutex.Lock() + defer fake.migrateToV24Mutex.Unlock() + fake.MigrateToV24Stub = nil + fake.migrateToV24Returns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV24ReturnsOnCall(i int, result1 bool) { + fake.migrateToV24Mutex.Lock() + defer fake.migrateToV24Mutex.Unlock() + fake.MigrateToV24Stub = nil + if fake.migrateToV24ReturnsOnCall == nil { + fake.migrateToV24ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrateToV24ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) NodeOUUpdated() bool { + fake.nodeOUUpdatedMutex.Lock() + ret, specificReturn := fake.nodeOUUpdatedReturnsOnCall[len(fake.nodeOUUpdatedArgsForCall)] + fake.nodeOUUpdatedArgsForCall = append(fake.nodeOUUpdatedArgsForCall, struct { + }{}) + stub := fake.NodeOUUpdatedStub + fakeReturns := fake.nodeOUUpdatedReturns + fake.recordInvocation("NodeOUUpdated", []interface{}{}) + fake.nodeOUUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) NodeOUUpdatedCallCount() int { + fake.nodeOUUpdatedMutex.RLock() + defer fake.nodeOUUpdatedMutex.RUnlock() + return len(fake.nodeOUUpdatedArgsForCall) +} + +func (fake *Update) NodeOUUpdatedCalls(stub func() bool) { + fake.nodeOUUpdatedMutex.Lock() + defer fake.nodeOUUpdatedMutex.Unlock() + fake.NodeOUUpdatedStub = stub +} + +func (fake *Update) NodeOUUpdatedReturns(result1 bool) { + fake.nodeOUUpdatedMutex.Lock() + defer fake.nodeOUUpdatedMutex.Unlock() + fake.NodeOUUpdatedStub = nil + fake.nodeOUUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) NodeOUUpdatedReturnsOnCall(i int, result1 bool) { + fake.nodeOUUpdatedMutex.Lock() + defer fake.nodeOUUpdatedMutex.Unlock() + fake.NodeOUUpdatedStub = nil + if fake.nodeOUUpdatedReturnsOnCall == nil { + fake.nodeOUUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.nodeOUUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) OrdererTagUpdated() bool { + fake.ordererTagUpdatedMutex.Lock() + ret, specificReturn := fake.ordererTagUpdatedReturnsOnCall[len(fake.ordererTagUpdatedArgsForCall)] + fake.ordererTagUpdatedArgsForCall = append(fake.ordererTagUpdatedArgsForCall, struct { + }{}) + stub := fake.OrdererTagUpdatedStub + fakeReturns := fake.ordererTagUpdatedReturns + fake.recordInvocation("OrdererTagUpdated", []interface{}{}) + fake.ordererTagUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) OrdererTagUpdatedCallCount() int { + fake.ordererTagUpdatedMutex.RLock() + defer fake.ordererTagUpdatedMutex.RUnlock() + return len(fake.ordererTagUpdatedArgsForCall) +} + +func (fake *Update) OrdererTagUpdatedCalls(stub func() bool) { + fake.ordererTagUpdatedMutex.Lock() + defer fake.ordererTagUpdatedMutex.Unlock() + fake.OrdererTagUpdatedStub = stub +} + +func (fake *Update) OrdererTagUpdatedReturns(result1 bool) { + fake.ordererTagUpdatedMutex.Lock() + defer fake.ordererTagUpdatedMutex.Unlock() + fake.OrdererTagUpdatedStub = nil + fake.ordererTagUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) OrdererTagUpdatedReturnsOnCall(i int, result1 bool) { + fake.ordererTagUpdatedMutex.Lock() + defer fake.ordererTagUpdatedMutex.Unlock() + fake.OrdererTagUpdatedStub = nil + if fake.ordererTagUpdatedReturnsOnCall == nil { + fake.ordererTagUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ordererTagUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeeded() bool { + fake.restartNeededMutex.Lock() + ret, specificReturn := fake.restartNeededReturnsOnCall[len(fake.restartNeededArgsForCall)] + fake.restartNeededArgsForCall = append(fake.restartNeededArgsForCall, struct { + }{}) + stub := fake.RestartNeededStub + fakeReturns := fake.restartNeededReturns + fake.recordInvocation("RestartNeeded", []interface{}{}) + fake.restartNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) RestartNeededCallCount() int { + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + return len(fake.restartNeededArgsForCall) +} + +func (fake *Update) RestartNeededCalls(stub func() bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = stub +} + +func (fake *Update) RestartNeededReturns(result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + fake.restartNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeededReturnsOnCall(i int, result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + if fake.restartNeededReturnsOnCall == nil { + fake.restartNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.restartNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdated() bool { + fake.specUpdatedMutex.Lock() + ret, specificReturn := fake.specUpdatedReturnsOnCall[len(fake.specUpdatedArgsForCall)] + fake.specUpdatedArgsForCall = append(fake.specUpdatedArgsForCall, struct { + }{}) + stub := fake.SpecUpdatedStub + fakeReturns := fake.specUpdatedReturns + fake.recordInvocation("SpecUpdated", []interface{}{}) + fake.specUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) SpecUpdatedCallCount() int { + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + return len(fake.specUpdatedArgsForCall) +} + +func (fake *Update) SpecUpdatedCalls(stub func() bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = stub +} + +func (fake *Update) SpecUpdatedReturns(result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + fake.specUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdatedReturnsOnCall(i int, result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + if fake.specUpdatedReturnsOnCall == nil { + fake.specUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.specUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCertUpdated() bool { + fake.tLSCertUpdatedMutex.Lock() + ret, specificReturn := fake.tLSCertUpdatedReturnsOnCall[len(fake.tLSCertUpdatedArgsForCall)] + fake.tLSCertUpdatedArgsForCall = append(fake.tLSCertUpdatedArgsForCall, struct { + }{}) + stub := fake.TLSCertUpdatedStub + fakeReturns := fake.tLSCertUpdatedReturns + fake.recordInvocation("TLSCertUpdated", []interface{}{}) + fake.tLSCertUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLSCertUpdatedCallCount() int { + fake.tLSCertUpdatedMutex.RLock() + defer fake.tLSCertUpdatedMutex.RUnlock() + return len(fake.tLSCertUpdatedArgsForCall) +} + +func (fake *Update) TLSCertUpdatedCalls(stub func() bool) { + fake.tLSCertUpdatedMutex.Lock() + defer fake.tLSCertUpdatedMutex.Unlock() + fake.TLSCertUpdatedStub = stub +} + +func (fake *Update) TLSCertUpdatedReturns(result1 bool) { + fake.tLSCertUpdatedMutex.Lock() + defer fake.tLSCertUpdatedMutex.Unlock() + fake.TLSCertUpdatedStub = nil + fake.tLSCertUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCertUpdatedReturnsOnCall(i int, result1 bool) { + fake.tLSCertUpdatedMutex.Lock() + defer fake.tLSCertUpdatedMutex.Unlock() + fake.TLSCertUpdatedStub = nil + if fake.tLSCertUpdatedReturnsOnCall == nil { + fake.tLSCertUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSCertUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertEnroll() bool { + fake.tLScertEnrollMutex.Lock() + ret, specificReturn := fake.tLScertEnrollReturnsOnCall[len(fake.tLScertEnrollArgsForCall)] + fake.tLScertEnrollArgsForCall = append(fake.tLScertEnrollArgsForCall, struct { + }{}) + stub := fake.TLScertEnrollStub + fakeReturns := fake.tLScertEnrollReturns + fake.recordInvocation("TLScertEnroll", []interface{}{}) + fake.tLScertEnrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLScertEnrollCallCount() int { + fake.tLScertEnrollMutex.RLock() + defer fake.tLScertEnrollMutex.RUnlock() + return len(fake.tLScertEnrollArgsForCall) +} + +func (fake *Update) TLScertEnrollCalls(stub func() bool) { + fake.tLScertEnrollMutex.Lock() + defer fake.tLScertEnrollMutex.Unlock() + fake.TLScertEnrollStub = stub +} + +func (fake *Update) TLScertEnrollReturns(result1 bool) { + fake.tLScertEnrollMutex.Lock() + defer fake.tLScertEnrollMutex.Unlock() + fake.TLScertEnrollStub = nil + fake.tLScertEnrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertEnrollReturnsOnCall(i int, result1 bool) { + fake.tLScertEnrollMutex.Lock() + defer fake.tLScertEnrollMutex.Unlock() + fake.TLScertEnrollStub = nil + if fake.tLScertEnrollReturnsOnCall == nil { + fake.tLScertEnrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLScertEnrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertNewKeyReenroll() bool { + fake.tLScertNewKeyReenrollMutex.Lock() + ret, specificReturn := fake.tLScertNewKeyReenrollReturnsOnCall[len(fake.tLScertNewKeyReenrollArgsForCall)] + fake.tLScertNewKeyReenrollArgsForCall = append(fake.tLScertNewKeyReenrollArgsForCall, struct { + }{}) + stub := fake.TLScertNewKeyReenrollStub + fakeReturns := fake.tLScertNewKeyReenrollReturns + fake.recordInvocation("TLScertNewKeyReenroll", []interface{}{}) + fake.tLScertNewKeyReenrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLScertNewKeyReenrollCallCount() int { + fake.tLScertNewKeyReenrollMutex.RLock() + defer fake.tLScertNewKeyReenrollMutex.RUnlock() + return len(fake.tLScertNewKeyReenrollArgsForCall) +} + +func (fake *Update) TLScertNewKeyReenrollCalls(stub func() bool) { + fake.tLScertNewKeyReenrollMutex.Lock() + defer fake.tLScertNewKeyReenrollMutex.Unlock() + fake.TLScertNewKeyReenrollStub = stub +} + +func (fake *Update) TLScertNewKeyReenrollReturns(result1 bool) { + fake.tLScertNewKeyReenrollMutex.Lock() + defer fake.tLScertNewKeyReenrollMutex.Unlock() + fake.TLScertNewKeyReenrollStub = nil + fake.tLScertNewKeyReenrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertNewKeyReenrollReturnsOnCall(i int, result1 bool) { + fake.tLScertNewKeyReenrollMutex.Lock() + defer fake.tLScertNewKeyReenrollMutex.Unlock() + fake.TLScertNewKeyReenrollStub = nil + if fake.tLScertNewKeyReenrollReturnsOnCall == nil { + fake.tLScertNewKeyReenrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLScertNewKeyReenrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertReenrollNeeded() bool { + fake.tLScertReenrollNeededMutex.Lock() + ret, specificReturn := fake.tLScertReenrollNeededReturnsOnCall[len(fake.tLScertReenrollNeededArgsForCall)] + fake.tLScertReenrollNeededArgsForCall = append(fake.tLScertReenrollNeededArgsForCall, struct { + }{}) + stub := fake.TLScertReenrollNeededStub + fakeReturns := fake.tLScertReenrollNeededReturns + fake.recordInvocation("TLScertReenrollNeeded", []interface{}{}) + fake.tLScertReenrollNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLScertReenrollNeededCallCount() int { + fake.tLScertReenrollNeededMutex.RLock() + defer fake.tLScertReenrollNeededMutex.RUnlock() + return len(fake.tLScertReenrollNeededArgsForCall) +} + +func (fake *Update) TLScertReenrollNeededCalls(stub func() bool) { + fake.tLScertReenrollNeededMutex.Lock() + defer fake.tLScertReenrollNeededMutex.Unlock() + fake.TLScertReenrollNeededStub = stub +} + +func (fake *Update) TLScertReenrollNeededReturns(result1 bool) { + fake.tLScertReenrollNeededMutex.Lock() + defer fake.tLScertReenrollNeededMutex.Unlock() + fake.TLScertReenrollNeededStub = nil + fake.tLScertReenrollNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertReenrollNeededReturnsOnCall(i int, result1 bool) { + fake.tLScertReenrollNeededMutex.Lock() + defer fake.tLScertReenrollNeededMutex.Unlock() + fake.TLScertReenrollNeededStub = nil + if fake.tLScertReenrollNeededReturnsOnCall == nil { + fake.tLScertReenrollNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLScertReenrollNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.certificateCreatedMutex.RLock() + defer fake.certificateCreatedMutex.RUnlock() + fake.certificateUpdatedMutex.RLock() + defer fake.certificateUpdatedMutex.RUnlock() + fake.configOverridesUpdatedMutex.RLock() + defer fake.configOverridesUpdatedMutex.RUnlock() + fake.cryptoBackupNeededMutex.RLock() + defer fake.cryptoBackupNeededMutex.RUnlock() + fake.deploymentUpdatedMutex.RLock() + defer fake.deploymentUpdatedMutex.RUnlock() + fake.ecertEnrollMutex.RLock() + defer fake.ecertEnrollMutex.RUnlock() + fake.ecertNewKeyReenrollMutex.RLock() + defer fake.ecertNewKeyReenrollMutex.RUnlock() + fake.ecertReenrollNeededMutex.RLock() + defer fake.ecertReenrollNeededMutex.RUnlock() + fake.ecertUpdatedMutex.RLock() + defer fake.ecertUpdatedMutex.RUnlock() + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + fake.getCreatedCertTypeMutex.RLock() + defer fake.getCreatedCertTypeMutex.RUnlock() + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + fake.mSPUpdatedMutex.RLock() + defer fake.mSPUpdatedMutex.RUnlock() + fake.migrateToV2Mutex.RLock() + defer fake.migrateToV2Mutex.RUnlock() + fake.migrateToV24Mutex.RLock() + defer fake.migrateToV24Mutex.RUnlock() + fake.nodeOUUpdatedMutex.RLock() + defer fake.nodeOUUpdatedMutex.RUnlock() + fake.ordererTagUpdatedMutex.RLock() + defer fake.ordererTagUpdatedMutex.RUnlock() + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + fake.tLSCertUpdatedMutex.RLock() + defer fake.tLSCertUpdatedMutex.RUnlock() + fake.tLScertEnrollMutex.RLock() + defer fake.tLScertEnrollMutex.RUnlock() + fake.tLScertNewKeyReenrollMutex.RLock() + defer fake.tLScertNewKeyReenrollMutex.RUnlock() + fake.tLScertReenrollNeededMutex.RLock() + defer fake.tLScertReenrollNeededMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Update) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ baseorderer.Update = new(Update) diff --git a/pkg/offering/base/orderer/node.go b/pkg/offering/base/orderer/node.go new file mode 100644 index 00000000..a0a54286 --- /dev/null +++ b/pkg/offering/base/orderer/node.go @@ -0,0 +1,1719 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseorderer + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + v2ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + v24ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/validator" + controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" +) + +const ( + NODE = "node" +) + +type Override interface { + Deployment(v1.Object, *appsv1.Deployment, resources.Action) error + Service(v1.Object, *corev1.Service, resources.Action) error + PVC(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error + EnvCM(v1.Object, *corev1.ConfigMap, resources.Action, map[string]interface{}) error + OrdererNode(v1.Object, *current.IBPOrderer, resources.Action) error +} + +//go:generate counterfeiter -o mocks/deployment_manager.go -fake-name DeploymentManager . DeploymentManager + +type DeploymentManager interface { + resources.Manager + CheckForSecretChange(v1.Object, string, func(string, *appsv1.Deployment) bool) error +} + +//go:generate counterfeiter -o mocks/initializeibporderer.go -fake-name InitializeIBPOrderer . InitializeIBPOrderer + +type InitializeIBPOrderer interface { + GenerateSecrets(commoninit.SecretType, *current.IBPOrderer, *commonconfig.Response) error + Create(initializer.OrdererConfig, initializer.IBPOrderer, string) (*initializer.Response, error) + Update(initializer.OrdererConfig, initializer.IBPOrderer) (*initializer.Response, error) + CreateOrUpdateConfigMap(*current.IBPOrderer, initializer.OrdererConfig) error + GetConfigFromConfigMap(instance *current.IBPOrderer) (*corev1.ConfigMap, error) + MissingCrypto(*current.IBPOrderer) bool + Delete(*current.IBPOrderer) error + CheckIfAdminCertsUpdated(*current.IBPOrderer) (bool, error) + UpdateAdminSecret(*current.IBPOrderer) error + GetInitOrderer(instance *current.IBPOrderer, storagePath string) (*initializer.Orderer, error) + GetUpdatedOrderer(instance *current.IBPOrderer) (*initializer.Orderer, error) + UpdateSecrets(prefix commoninit.SecretType, instance *current.IBPOrderer, crypto *commonconfig.Response) error + GenerateSecretsFromResponse(instance *current.IBPOrderer, cryptoResponse *commonconfig.CryptoResponse) error + UpdateSecretsFromResponse(instance *current.IBPOrderer, cryptoResponse *commonconfig.CryptoResponse) error + GetCrypto(instance *current.IBPOrderer) (*commonconfig.CryptoResponse, error) + GetCoreConfigFromFile(instance *current.IBPOrderer, file string) (initializer.OrdererConfig, error) + GetCoreConfigFromBytes(instance *current.IBPOrderer, bytes []byte) (initializer.OrdererConfig, error) +} + +//go:generate counterfeiter -o mocks/update.go -fake-name Update . Update + +type Update interface { + SpecUpdated() bool + ConfigOverridesUpdated() bool + TLSCertUpdated() bool + EcertUpdated() bool + OrdererTagUpdated() bool + CertificateUpdated() bool + RestartNeeded() bool + EcertReenrollNeeded() bool + TLScertReenrollNeeded() bool + EcertNewKeyReenroll() bool + TLScertNewKeyReenroll() bool + DeploymentUpdated() bool + MSPUpdated() bool + EcertEnroll() bool + TLScertEnroll() bool + CertificateCreated() bool + GetCreatedCertType() commoninit.SecretType + CryptoBackupNeeded() bool + MigrateToV2() bool + MigrateToV24() bool + NodeOUUpdated() bool + ImagesUpdated() bool + FabricVersionUpdated() bool +} + +type IBPOrderer interface { + Initialize(instance *current.IBPOrderer, update Update) error + PreReconcileChecks(instance *current.IBPOrderer, update Update) (bool, error) + ReconcileManagers(instance *current.IBPOrderer, update Update, genesisBlock []byte) error + Reconcile(instance *current.IBPOrderer, update Update) (common.Result, error) +} + +//go:generate counterfeiter -o mocks/certificate_manager.go -fake-name CertificateManager . CertificateManager + +type CertificateManager interface { + CheckCertificatesForExpire(instance v1.Object, numSecondsBeforeExpire int64) (current.IBPCRStatusType, string, error) + GetSignCert(string, string) ([]byte, error) + GetDurationToNextRenewal(commoninit.SecretType, v1.Object, int64) (time.Duration, error) + RenewCert(commoninit.SecretType, certificate.Instance, *current.EnrollmentSpec, *commonapi.BCCSP, string, bool, bool) error +} + +//go:generate counterfeiter -o mocks/restart_manager.go -fake-name RestartManager . RestartManager + +type RestartManager interface { + ForAdminCertUpdate(instance v1.Object) error + ForCertUpdate(certType commoninit.SecretType, instance v1.Object) error + ForConfigOverride(instance v1.Object) error + ForNodeOU(instance v1.Object) error + TriggerIfNeeded(instance restart.Instance) error + ForRestartAction(instance v1.Object) error +} + +type OrdererConfig interface { + MergeWith(interface{}, bool) error + ToBytes() ([]byte, error) + UsingPKCS11() bool + SetPKCS11Defaults(bool) + GetBCCSPSection() *commonapi.BCCSP + SetDefaultKeyStore() + SetBCCSPLibrary(string) +} + +type Manager struct { + Client controllerclient.Client + Scheme *runtime.Scheme + Config *config.Config +} + +func (m *Manager) GetNode(nodeNumber int, renewCertTimers map[string]*time.Timer, restartManager RestartManager) *Node { + return NewNode(m.Client, m.Scheme, m.Config, fmt.Sprintf("%s%d", NODE, nodeNumber), renewCertTimers, restartManager) +} + +var _ IBPOrderer = &Node{} + +type Node struct { + Client controllerclient.Client + Scheme *runtime.Scheme + Config *config.Config + + DeploymentManager DeploymentManager + ServiceManager resources.Manager + PVCManager resources.Manager + EnvConfigMapManager resources.Manager + RoleManager resources.Manager + RoleBindingManager resources.Manager + ServiceAccountManager resources.Manager + + Override Override + Initializer InitializeIBPOrderer + Name string + + CertificateManager CertificateManager + RenewCertTimers map[string]*time.Timer + + Restart RestartManager +} + +func NewNode(client controllerclient.Client, scheme *runtime.Scheme, config *config.Config, name string, renewCertTimers map[string]*time.Timer, restartManager RestartManager) *Node { + n := &Node{ + Client: client, + Scheme: scheme, + Config: config, + Override: &override.Override{ + Name: name, + Client: client, + Config: config, + }, + Name: name, + RenewCertTimers: renewCertTimers, + Restart: restartManager, + } + n.CreateManagers() + + validator := &validator.Validator{ + Client: client, + } + + n.Initializer = initializer.New(client, scheme, config.OrdererInitConfig, name, validator) + n.CertificateManager = certificate.New(client, scheme) + + return n +} + +func NewNodeWithOverrides(client controllerclient.Client, scheme *runtime.Scheme, config *config.Config, name string, o Override, renewCertTimers map[string]*time.Timer, restartManager RestartManager) *Node { + n := &Node{ + Client: client, + Scheme: scheme, + Config: config, + Override: o, + Name: name, + RenewCertTimers: renewCertTimers, + Restart: restartManager, + } + n.CreateManagers() + + validator := &validator.Validator{ + Client: client, + } + + n.Initializer = initializer.New(client, scheme, config.OrdererInitConfig, name, validator) + n.CertificateManager = certificate.New(client, scheme) + + return n +} + +func (n *Node) CreateManagers() { + override := n.Override + resourceManager := resourcemanager.New(n.Client, n.Scheme) + n.DeploymentManager = resourceManager.CreateDeploymentManager("", override.Deployment, n.GetLabels, n.Config.OrdererInitConfig.DeploymentFile) + n.ServiceManager = resourceManager.CreateServiceManager("", override.Service, n.GetLabels, n.Config.OrdererInitConfig.ServiceFile) + n.PVCManager = resourceManager.CreatePVCManager("", override.PVC, n.GetLabels, n.Config.OrdererInitConfig.PVCFile) + n.EnvConfigMapManager = resourceManager.CreateConfigMapManager("env", override.EnvCM, n.GetLabels, n.Config.OrdererInitConfig.CMFile, nil) + n.RoleManager = resourceManager.CreateRoleManager("", nil, n.GetLabels, n.Config.OrdererInitConfig.RoleFile) + n.RoleBindingManager = resourceManager.CreateRoleBindingManager("", nil, n.GetLabels, n.Config.OrdererInitConfig.RoleBindingFile) + n.ServiceAccountManager = resourceManager.CreateServiceAccountManager("", nil, n.GetLabels, n.Config.OrdererInitConfig.ServiceAccountFile) +} + +func (n *Node) Reconcile(instance *current.IBPOrderer, update Update) (common.Result, error) { + log.Info(fmt.Sprintf("Reconciling node instance '%s' ... update: %+v", instance.Name, update)) + var err error + var status *current.CRStatus + + versionSet, err := n.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := n.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + externalEndpointUpdated := n.UpdateExternalEndpoint(instance) + + if instanceUpdated || externalEndpointUpdated { + log.Info(fmt.Sprintf("Updating instance after pre reconcile checks: %t, updating external endpoint: %t", + instanceUpdated, externalEndpointUpdated)) + + err = n.Client.Patch(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + Status: ¤t.CRStatus{ + Type: current.Initializing, + Reason: "Setting default values for either zone, region, and/or external endpoint", + Message: "Operator has updated spec with defaults as part of initialization", + }, + }, nil + } + + err = n.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.OrdererInitilizationFailed, "failed to initialize orderer node") + } + + err = n.ReconcileManagers(instance, update, nil) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = n.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = n.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + // custom product logic can be implemented here + // No-Op atm + status, result, err := n.CustomLogic(instance, update) + + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to run custom offering logic") + } + + if result != nil { + return *result, nil + } + + if update.MSPUpdated() { + err = n.UpdateMSPCertificates(instance) + if err != nil { + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update certificates passed in MSP spec") + } + } + } + + if update.EcertUpdated() { + log.Info("Ecert was updated") + // Request deployment restart for tls cert update + err = n.Restart.ForCertUpdate(commoninit.ECERT, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.TLSCertUpdated() { + log.Info("TLS cert was updated") + // Request deployment restart for ecert update + err = n.Restart.ForCertUpdate(commoninit.TLS, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if err := n.HandleActions(instance, update); err != nil { + return common.Result{}, errors.Wrap(err, "failed to handle actions") + } + + if err := n.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +// PreReconcileChecks validate CR request before starting reconcile flow +func (n *Node) PreReconcileChecks(instance *current.IBPOrderer, update Update) (bool, error) { + var err error + + imagesUpdated, err := reconcilechecks.FabricVersionHelper(instance, n.Config.Operator.Versions, update) + if err != nil { + return false, errors.Wrap(err, "failed during version and image checks") + } + + if instance.Spec.HSMSet() { + err = util.ValidateHSMProxyURL(instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return false, errors.Wrapf(err, "invalid HSM endpoint for orderer instance '%s'", instance.GetName()) + } + } + + if !instance.Spec.DomainSet() { + return false, fmt.Errorf("domain not set for orderer instance '%s'", instance.GetName()) + } + + if instance.Spec.Action.Enroll.Ecert && instance.Spec.Action.Reenroll.Ecert { + return false, errors.New("both enroll and renenroll action requested for ecert, must only select one") + } + + if instance.Spec.Action.Enroll.TLSCert && instance.Spec.Action.Reenroll.TLSCert { + return false, errors.New("both enroll and renenroll action requested for TLS cert, must only select one") + } + + if instance.Spec.Action.Enroll.Ecert && instance.Spec.Action.Reenroll.EcertNewKey { + return false, errors.New("both enroll and renenroll with new key action requested for ecert, must only select one") + } + + if instance.Spec.Action.Enroll.TLSCert && instance.Spec.Action.Reenroll.TLSCertNewKey { + return false, errors.New("both enroll and renenroll with new key action requested for TLS cert, must only select one") + } + + if instance.Spec.Action.Reenroll.Ecert && instance.Spec.Action.Reenroll.EcertNewKey { + return false, errors.New("both reenroll and renenroll with new key action requested for ecert, must only select one") + } + + if instance.Spec.Action.Reenroll.TLSCert && instance.Spec.Action.Reenroll.TLSCertNewKey { + return false, errors.New("both reenroll and renenroll with new key action requested for TLS cert, must only select one") + } + + zoneUpdated, err := n.SelectZone(instance) + if err != nil { + return false, err + } + + regionUpdated, err := n.SelectRegion(instance) + if err != nil { + return false, err + } + + hsmImageUpdated := n.ReconcileHSMImages(instance) + + var replicasUpdated bool + if instance.Spec.Replicas == nil { + replicas := int32(1) + instance.Spec.Replicas = &replicas + replicasUpdated = true + } + + updated := zoneUpdated || regionUpdated || hsmImageUpdated || replicasUpdated || imagesUpdated + + if updated { + log.Info(fmt.Sprintf("zoneUpdated %t, regionUpdated %t, hsmImageUpdated %t, replicasUpdated %t, imagesUpdated %t", + zoneUpdated, regionUpdated, hsmImageUpdated, replicasUpdated, imagesUpdated)) + } + + return updated, nil +} + +func (n *Node) Initialize(instance *current.IBPOrderer, update Update) error { + var err error + + log.Info(fmt.Sprintf("Checking if initialization needed for node: %s", instance.GetName())) + + // TODO: Add checks to determine if initialization is neeeded. Split this method into + // two, one should handle initialization during the create event of a CR and the other + // should update events + + // Service account is required by HSM init job + err = n.ReconcileRBAC(instance) + if err != nil { + return errors.Wrap(err, "failed RBAC reconciliation") + } + + if instance.IsHSMEnabled() { + // If HSM config not found, HSM proxy is being used + if instance.UsingHSMProxy() { + err = os.Setenv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return err + } + } else { + + hsmConfig, err := commonconfig.ReadHSMConfig(n.Client, instance) + if err != nil { + return errors.New("using non-proxy HSM, but no HSM config defined as config map 'ibp-hsm-config'") + } + + if hsmConfig.Daemon != nil { + log.Info("Using daemon based HSM, creating pvc...") + n.PVCManager.SetCustomName(instance.Spec.CustomNames.PVC.Orderer) + err = n.PVCManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed PVC reconciliation") + } + } + } + } + + initOrderer, err := n.Initializer.GetInitOrderer(instance, n.GetInitStoragePath(instance)) + if err != nil { + return err + } + initOrderer.UsingHSMProxy = instance.UsingHSMProxy() + + ordererConfig := n.Config.OrdererInitConfig.OrdererFile + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + ordererConfig = n.Config.OrdererInitConfig.OrdererV24File + } else if currentVer.LessThan(version.V2_4_1) { + ordererConfig = n.Config.OrdererInitConfig.OrdererV2File + } + } + + initOrderer.Config, err = n.Initializer.GetCoreConfigFromFile(instance, ordererConfig) + if err != nil { + return err + } + + updated := update.ConfigOverridesUpdated() || update.NodeOUUpdated() + if update.ConfigOverridesUpdated() { + err = n.InitializeUpdateConfigOverride(instance, initOrderer) + if err != nil { + return err + } + // Request deployment restart for config override update + if err := n.Restart.ForConfigOverride(instance); err != nil { + return err + } + } + if update.NodeOUUpdated() { + err = n.InitializeUpdateNodeOU(instance) + if err != nil { + return err + } + // Request deloyment restart for node OU update + if err = n.Restart.ForNodeOU(instance); err != nil { + return err + } + } + if !updated { + err = n.InitializeCreate(instance, initOrderer) + if err != nil { + return err + } + } + + updateNeeded, err := n.Initializer.CheckIfAdminCertsUpdated(instance) + if err != nil { + return err + } + + if updateNeeded { + err = n.Initializer.UpdateAdminSecret(instance) + if err != nil { + return err + } + // Request deployment restart for admin cert updates + if err = n.Restart.ForAdminCertUpdate(instance); err != nil { + return err + } + } + + return nil +} + +func (n *Node) InitializeCreate(instance *current.IBPOrderer, initOrderer *initializer.Orderer) error { + // TODO: Should also check for secrets not just config map + if n.ConfigExists(instance) { + log.Info(fmt.Sprintf("Config '%s-config' exists, not reinitializing node", instance.GetName())) + return nil + } + + log.Info(fmt.Sprintf("Running initialization for create event on node '%s', since config '%s-config' does not exists", instance.GetName(), instance.GetName())) + configOverride, err := instance.GetConfigOverride() + if err != nil { + return err + } + resp, err := n.Initializer.Create(configOverride.(OrdererConfig), initOrderer, n.GetInitStoragePath(instance)) + if err != nil { + return err + } + + if resp != nil { + if resp.Crypto != nil { + if !instance.Spec.NodeOUDisabled() { + if err := resp.Crypto.VerifyCertOU("orderer"); err != nil { + return err + } + } + + err = n.Initializer.GenerateSecretsFromResponse(instance, resp.Crypto) + if err != nil { + return err + } + } + + if resp.Config != nil { + log.Info(fmt.Sprintf("Create config map for '%s'...", instance.GetName())) + if instance.IsHSMEnabled() && !instance.UsingHSMProxy() { + hsmConfig, err := commonconfig.ReadHSMConfig(n.Client, instance) + if err != nil { + return err + } + resp.Config.SetBCCSPLibrary(filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath))) + } + + err = n.Initializer.CreateOrUpdateConfigMap(instance, resp.Config) + if err != nil { + return err + } + } + } + + return nil +} + +func (n *Node) ConfigExists(instance *current.IBPOrderer) bool { + name := fmt.Sprintf("%s-config", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: name, + Namespace: instance.Namespace, + } + + cm := &corev1.ConfigMap{} + err := n.Client.Get(context.TODO(), namespacedName, cm) + if err != nil { + return false + } + + return true +} + +func (n *Node) InitializeUpdateConfigOverride(instance *current.IBPOrderer, initOrderer *initializer.Orderer) error { + log.Info(fmt.Sprintf("Running initialization update config override for node: %s", instance.GetName())) + + if n.Initializer.MissingCrypto(instance) { + log.Info("Missing crypto for node") + // If crypto is missing, we should run the create logic + err := n.InitializeCreate(instance, initOrderer) + if err != nil { + return err + } + + return nil + } + + cm, err := n.Initializer.GetConfigFromConfigMap(instance) + if err != nil { + return err + } + + initOrderer.Config, err = n.Initializer.GetCoreConfigFromBytes(instance, cm.BinaryData["orderer.yaml"]) + if err != nil { + return err + } + + configOverride, err := instance.GetConfigOverride() + if err != nil { + return err + } + + resp, err := n.Initializer.Update(configOverride.(OrdererConfig), initOrderer) + if err != nil { + return err + } + + if resp != nil && resp.Config != nil { + log.Info(fmt.Sprintf("Update config map for '%s'...", instance.GetName())) + err = n.Initializer.CreateOrUpdateConfigMap(instance, resp.Config) + if err != nil { + return err + } + } + + return nil +} + +func (n *Node) InitializeUpdateNodeOU(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("Running initialize update node OU enabled: %t for orderer '%s", !instance.Spec.NodeOUDisabled(), instance.GetName())) + + crypto, err := n.Initializer.GetCrypto(instance) + if err != nil { + return err + } + + if !instance.Spec.NodeOUDisabled() { + if err := crypto.VerifyCertOU("orderer"); err != nil { + return err + + } + } else { + // If nodeOUDisabled, admin certs are required + if crypto.Enrollment.AdminCerts == nil { + return errors.New("node OU disabled, admin certs are required but missing") + } + } + + // Update config.yaml in config map + err = n.Initializer.CreateOrUpdateConfigMap(instance, nil) + if err != nil { + return err + } + + return nil +} + +func (n *Node) ReconcileManagers(instance *current.IBPOrderer, updated Update, genesisBlock []byte) error { + var err error + + update := updated.SpecUpdated() + + n.PVCManager.SetCustomName(instance.Spec.CustomNames.PVC.Orderer) + err = n.PVCManager.Reconcile(instance, update) + if err != nil { + return errors.Wrapf(err, "failed PVC reconciliation") + } + + err = n.ServiceManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Service reconciliation") + } + + err = n.ReconcileRBAC(instance) + if err != nil { + return errors.Wrap(err, "failed RBAC reconciliation") + } + + err = n.EnvConfigMapManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Env ConfigMap reconciliation") + } + + if instance.Spec.IsUsingChannelLess() { + log.Info("Node is in channel less mode - ending reconcile") + } else if !instance.Spec.IsPrecreateOrderer() { + log.Info("Node is not precreate - reconciling genesis secret") + err = n.ReconcileGenesisSecret(instance) + if err != nil { + return errors.Wrap(err, "failed Genesis Secret reconciliation") + } + } + + err = n.DeploymentManager.Reconcile(instance, updated.DeploymentUpdated()) + if err != nil { + return errors.Wrap(err, "failed Deployment reconciliation") + } + + return nil +} + +func (n *Node) CheckStates(instance *current.IBPOrderer) error { + // Don't need to check state if the state is being updated via CR. State needs + // to be checked if operator detects changes to a resources that was not triggered + // via CR. + if n.DeploymentManager.Exists(instance) { + err := n.DeploymentManager.CheckState(instance) + if err != nil { + log.Error(err, "unexpected state") + err = n.DeploymentManager.RestoreState(instance) + if err != nil { + return err + } + } + } + + return nil +} + +func (n *Node) SetVersion(instance *current.IBPOrderer) (bool, error) { + if instance.Status.Version == "" || !version.String(instance.Status.Version).Equal(version.Operator) { + log.Info("Version of Operator: ", "version", version.Operator) + log.Info(fmt.Sprintf("Version of CR '%s': %s", instance.GetName(), instance.Status.Version)) + log.Info(fmt.Sprintf("Setting '%s' to version '%s'", instance.Name, version.Operator)) + + instance.Status.Version = version.Operator + err := n.Client.PatchStatus(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (n *Node) GetLabels(instance v1.Object) map[string]string { + parts := strings.Split(instance.GetName(), "node") + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + if len(parts) > 1 { + ordererclustername := strings.Join(parts[:len(parts)-1], "node") + orderingnode := "node" + parts[len(parts)-1] + return map[string]string{ + "app": instance.GetName(), + "creator": label, + "orderingservice": ordererclustername, + "orderingnode": orderingnode, + "parent": ordererclustername, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "orderer", + "app.kubernetes.io/managed-by": label + "-operator", + } + } + + return map[string]string{ + "app": instance.GetName(), + "creator": label, + "orderingservice": fmt.Sprintf("%s", instance.GetName()), + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "orderer", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (n *Node) Delete(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("Deleting node '%s'", n.Name)) + err := n.ServiceManager.Delete(instance) + if err != nil { + return errors.Wrapf(err, "failed to delete service '%s'", n.ServiceManager.GetName(instance)) + } + + err = n.PVCManager.Delete(instance) + if err != nil { + return errors.Wrapf(err, "failed to delete pvc '%s'", n.ServiceManager.GetName(instance)) + } + + err = n.EnvConfigMapManager.Delete(instance) + if err != nil { + return errors.Wrapf(err, "failed to delete config map '%s'", n.ServiceManager.GetName(instance)) + } + + err = n.Initializer.Delete(instance) + if err != nil { + return errors.Wrapf(err, "failed to delete secrets") + } + + // Important: This must always be the last resource to be deleted + err = n.DeploymentManager.Delete(instance) + if err != nil { + return errors.Wrapf(err, "failed to delete deployment '%s'", n.DeploymentManager.GetName(instance)) + } + + return nil +} + +func (n *Node) ReconcileGenesisSecret(instance *current.IBPOrderer) error { + namespacedName := types.NamespacedName{ + Name: instance.Name + "-genesis", + Namespace: instance.Namespace, + } + + secret := &corev1.Secret{} + err := n.Client.Get(context.TODO(), namespacedName, secret) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return n.CreateGenesisSecret(instance) + } + // Error reading the object - requeue the request. + return err + } + return nil +} + +func (n *Node) CreateGenesisSecret(instance *current.IBPOrderer) error { + data := map[string][]byte{} + + genesisBlock, err := util.Base64ToBytes(instance.Spec.GenesisBlock) + if err != nil { + return errors.Wrap(err, "failed to decode genesis block") + } + + data["orderer.block"] = genesisBlock + s := &corev1.Secret{ + Data: data, + } + s.Name = instance.Name + "-genesis" + s.Namespace = instance.Namespace + s.Labels = n.GetLabels(instance) + + err = n.Client.CreateOrUpdate(context.TODO(), s, controllerclient.CreateOrUpdateOption{ + Owner: instance, + Scheme: n.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create genesis secret") + } + + return nil +} + +func (n *Node) ReconcileRBAC(instance *current.IBPOrderer) error { + var err error + + err = n.RoleManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = n.RoleBindingManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = n.ServiceAccountManager.Reconcile(instance, false) + if err != nil { + return err + } + + return nil +} + +func (n *Node) SelectZone(instance *current.IBPOrderer) (bool, error) { + if instance.Spec.Zone == "select" { + log.Info("Selecting zone...") + zone := util.GetZone(n.Client) + log.Info(fmt.Sprintf("Zone set to: '%s'", zone)) + instance.Spec.Zone = zone + return true, nil + } + if instance.Spec.Zone != "" { + err := util.ValidateZone(n.Client, instance.Spec.Zone) + if err != nil { + return false, err + } + } + return false, nil +} + +func (n *Node) SelectRegion(instance *current.IBPOrderer) (bool, error) { + if instance.Spec.Region == "select" { + log.Info("Selecting region...") + region := util.GetRegion(n.Client) + log.Info(fmt.Sprintf("Region set to: '%s'", region)) + instance.Spec.Region = region + return true, nil + } + if instance.Spec.Region != "" { + err := util.ValidateRegion(n.Client, instance.Spec.Region) + if err != nil { + return false, err + } + } + return false, nil +} + +func (n *Node) UpdateExternalEndpoint(instance *current.IBPOrderer) bool { + if instance.Spec.ExternalAddress == "" { + instance.Spec.ExternalAddress = instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain + ":443" + return true + } + return false +} + +func (n *Node) UpdateConnectionProfile(instance *current.IBPOrderer) error { + var err error + + endpoints := n.GetEndpoints(instance) + + tlscert, err := common.GetTLSSignCertEncoded(n.Client, instance) + if err != nil { + return err + } + + tlscacerts, err := common.GetTLSCACertEncoded(n.Client, instance) + if err != nil { + return err + } + + tlsintercerts, err := common.GetTLSIntercertEncoded(n.Client, instance) + if err != nil { + return err + } + + ecert, err := common.GetEcertSignCertEncoded(n.Client, instance) + if err != nil { + return err + } + + cacert, err := common.GetEcertCACertEncoded(n.Client, instance) + if err != nil { + return err + } + + admincerts, err := common.GetEcertAdmincertEncoded(n.Client, instance) + if err != nil { + return err + } + + if len(tlsintercerts) > 0 { + tlscacerts = tlsintercerts + } + + err = n.UpdateConnectionProfileConfigmap(instance, *endpoints, tlscert, tlscacerts, ecert, cacert, admincerts) + if err != nil { + return err + } + + return nil +} + +func (n *Node) UpdateConnectionProfileConfigmap(instance *current.IBPOrderer, endpoints current.OrdererEndpoints, tlscert string, tlscacerts []string, ecert string, cacert []string, admincerts []string) error { + + // TODO add ecert.intermediatecerts and ecert.admincerts + // TODO add tls.cacerts + // TODO get the whole PeerConnectionProfile object from caller?? + name := instance.Name + "-connection-profile" + connectionProfile := ¤t.OrdererConnectionProfile{ + Endpoints: endpoints, + TLS: ¤t.MSP{ + SignCerts: tlscert, + CACerts: tlscacerts, + }, + Component: ¤t.MSP{ + SignCerts: ecert, + CACerts: cacert, + AdminCerts: admincerts, + }, + } + + bytes, err := json.Marshal(connectionProfile) + if err != nil { + return errors.Wrap(err, "failed to marshal connection profile") + } + cm := &corev1.ConfigMap{ + BinaryData: map[string][]byte{"profile.json": bytes}, + } + cm.Name = name + cm.Namespace = instance.Namespace + cm.Labels = n.GetLabels(instance) + + nn := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + err = n.Client.Get(context.TODO(), nn, &corev1.ConfigMap{}) + if err == nil { + log.Info(fmt.Sprintf("Update connection profile configmap '%s' for %s", nn.Name, instance.Name)) + err = n.Client.Update(context.TODO(), cm, controllerclient.UpdateOption{Owner: instance, Scheme: n.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to update connection profile configmap") + } + } else { + log.Info(fmt.Sprintf("Create connection profile configmap '%s' for %s", nn.Name, instance.Name)) + err = n.Client.Create(context.TODO(), cm, controllerclient.CreateOption{Owner: instance, Scheme: n.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to create connection profile configmap") + } + } + + return nil +} + +func (n *Node) GetEndpoints(instance *current.IBPOrderer) *current.OrdererEndpoints { + endpoints := ¤t.OrdererEndpoints{ + API: "grpcs://" + instance.Namespace + "-" + instance.Name + "-orderer." + instance.Spec.Domain + ":443", + Operations: "https://" + instance.Namespace + "-" + instance.Name + "-operations." + instance.Spec.Domain + ":443", + Grpcweb: "https://" + instance.Namespace + "-" + instance.Name + "-grpcweb." + instance.Spec.Domain + ":443", + } + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + endpoints.Admin = "https://" + instance.Namespace + "-" + instance.Name + "-admin." + instance.Spec.Domain + ":443" + } + return endpoints +} + +func (n *Node) UpdateParentStatus(instance *current.IBPOrderer) error { + parentName := instance.Labels["parent"] + + nn := types.NamespacedName{ + Name: parentName, + Namespace: instance.GetNamespace(), + } + + log.Info(fmt.Sprintf("Node '%s' is setting parent '%s' status", instance.GetName(), parentName)) + + parentInstance := ¤t.IBPOrderer{} + err := n.Client.Get(context.TODO(), nn, parentInstance) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Parent '%s' for node '%s' not found, skipping setting parent status", parentName, instance.GetName())) + return nil + } + return err + } + + // If parent is deployed and child was not updated to warning state, no longer update the parent + if parentInstance.Status.Type == current.Deployed && instance.Status.Type != current.Warning { + log.Info(fmt.Sprintf("Parent '%s' is in 'Deployed' state, can't update status", parentName)) + return nil + } + + labelSelector, err := labels.Parse(fmt.Sprintf("parent=%s", parentName)) + if err != nil { + return errors.Wrap(err, "failed to parse selector for parent name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: instance.GetNamespace(), + } + + ordererList := ¤t.IBPOrdererList{} + err = n.Client.List(context.TODO(), ordererList, listOptions) + if err != nil { + return err + } + + clustersize := parentInstance.Spec.ClusterSize + + var returnStatus current.IBPCRStatusType + reason := "No reason" + + log.Info(fmt.Sprintf("Found %d nodes, original cluster size %d", len(ordererList.Items), clustersize)) + + updateStatus := false + errorstateNodes := []string{} + deployingstateNodes := []string{} + precreatedNodes := []string{} + deployedNodes := []string{} + warningNodes := []string{} + + for _, node := range ordererList.Items { + if node.Status.Type == current.Error { + log.Info(fmt.Sprintf("Node %s is in Error state", node.GetName())) + errorstateNodes = append(errorstateNodes, node.GetName()) + } else if node.Status.Type == current.Deploying { + log.Info(fmt.Sprintf("Node %s is in Deploying state", node.GetName())) + deployingstateNodes = append(deployingstateNodes, node.GetName()) + } else if node.Status.Type == current.Precreated { + log.Info(fmt.Sprintf("Node %s is in Precreating state", node.GetName())) + precreatedNodes = append(precreatedNodes, node.GetName()) + } else if node.Status.Type == current.Warning { + log.Info(fmt.Sprintf("Node %s is in Warning state", node.GetName())) + warningNodes = append(warningNodes, node.GetName()) + } else if node.Status.Type == current.Deployed { + log.Info(fmt.Sprintf("Node %s is in Deployed state", node.GetName())) + deployedNodes = append(deployedNodes, node.GetName()) + } + } + + if len(deployingstateNodes) != 0 { + log.Info("Nodes are in deploying state currently, not updating parent status") + updateStatus = false + } else if len(errorstateNodes) != 0 { + updateStatus = true + reason = "The orderer nodes " + strings.Join(errorstateNodes[:], ",") + " are in Error state" + returnStatus = current.Error + } else if len(precreatedNodes) != 0 { + updateStatus = true + reason = "The orderer nodes " + strings.Join(precreatedNodes[:], ",") + " are in Precreated state" + returnStatus = current.Precreated + } else if len(warningNodes) != 0 { + updateStatus = true + reason = "The orderer nodes " + strings.Join(warningNodes[:], ",") + " are in Warning state" + returnStatus = current.Warning + } else if len(deployedNodes) != 0 { + updateStatus = true + returnStatus = current.Deployed + reason = "All nodes are deployed" + } + + if updateStatus { + parentInstance.Status.Type = returnStatus + parentInstance.Status.Status = current.True + parentInstance.Status.Reason = reason + parentInstance.Status.LastHeartbeatTime = time.Now().String() + + log.Info(fmt.Sprintf("Setting parent status to: %+v", parentInstance.Status)) + err = n.Client.UpdateStatus(context.TODO(), parentInstance) + if err != nil { + return err + } + } + + return nil +} + +func (n *Node) GetInitStoragePath(instance *current.IBPOrderer) string { + if n.Config != nil && n.Config.OrdererInitConfig != nil && n.Config.OrdererInitConfig.StoragePath != "" { + return filepath.Join(n.Config.OrdererInitConfig.StoragePath, instance.GetName()) + } + + return filepath.Join("/", "ordererinit", instance.GetName()) +} + +func (n *Node) GetBCCSPSectionForInstance(instance *current.IBPOrderer) (*commonapi.BCCSP, error) { + var bccsp *commonapi.BCCSP + if instance.IsHSMEnabled() { + co, err := instance.GetConfigOverride() + if err != nil { + return nil, errors.Wrap(err, "failed to get configoverride") + } + + configOverride := co.(OrdererConfig) + configOverride.SetPKCS11Defaults(instance.UsingHSMProxy()) + bccsp = configOverride.GetBCCSPSection() + } + + return bccsp, nil +} + +func (n *Node) ReconcileFabricOrdererMigration(instance *current.IBPOrderer) error { + ordererConfig, err := n.FabricOrdererMigration(instance) + if err != nil { + return errors.Wrap(err, "failed to migrate orderer between fabric versions") + } + + if ordererConfig != nil { + log.Info("Orderer config updated during fabric orderer migration, updating config map...") + if err := n.Initializer.CreateOrUpdateConfigMap(instance, ordererConfig); err != nil { + return errors.Wrapf(err, "failed to create/update '%s' orderer's config map", instance.GetName()) + } + } + + return nil +} + +// Moving to fabric version above 1.4.6 require that the `msp/keystore` value be removed +// from BCCSP section if configured to use PKCS11 (HSM). NOTE: This does not support +// migration across major release, will not cover migration orderer from 1.4.x to 2.x +func (n *Node) FabricOrdererMigration(instance *current.IBPOrderer) (*ordererconfig.Orderer, error) { + if !instance.IsHSMEnabled() { + return nil, nil + } + + ordererTag := instance.Spec.Images.OrdererTag + if !strings.Contains(ordererTag, "sha") { + tag := strings.Split(ordererTag, "-")[0] + + ordererVersion := version.String(tag) + if !ordererVersion.GreaterThan(version.V1_4_6) { + return nil, nil + } + + log.Info(fmt.Sprintf("Orderer moving to fabric version %s", ordererVersion)) + } else { + if instance.Spec.FabricVersion == version.V2 { + return nil, nil + } + log.Info(fmt.Sprintf("Orderer moving to digest %s", ordererTag)) + } + + // Read orderer config map and remove keystore value from BCCSP section + cm, err := n.Initializer.GetConfigFromConfigMap(instance) + if err != nil { + return nil, errors.Wrapf(err, "failed to get '%s' orderer's config map", instance.GetName()) + } + + ordererConfig := &ordererconfig.Orderer{} + if err := yaml.Unmarshal(cm.BinaryData["orderer.yaml"], ordererConfig); err != nil { + return nil, errors.Wrap(err, "invalid orderer config") + } + + // If already nil, don't need to proceed further as config updates are not required + if ordererConfig.General.BCCSP.PKCS11.FileKeyStore == nil { + return nil, nil + } + + ordererConfig.General.BCCSP.PKCS11.FileKeyStore = nil + + return ordererConfig, nil +} + +func (n *Node) UpdateMSPCertificates(instance *current.IBPOrderer) error { + log.Info("Updating certificates passed in MSP spec") + updatedOrderer, err := n.Initializer.GetUpdatedOrderer(instance) + if err != nil { + return err + } + + crypto, err := updatedOrderer.GenerateCrypto() + if err != nil { + return err + } + + if crypto != nil { + err = n.Initializer.UpdateSecrets("ecert", instance, crypto.Enrollment) + if err != nil { + return errors.Wrap(err, "failed to update ecert secrets") + } + + err = n.Initializer.UpdateSecrets("tls", instance, crypto.TLS) + if err != nil { + return errors.Wrap(err, "failed to update tls secrets") + } + + err = n.Initializer.UpdateSecrets("clientauth", instance, crypto.ClientAuth) + if err != nil { + return errors.Wrap(err, "failed to update client auth secrets") + } + } + + return nil +} + +func (n *Node) RenewCert(certType commoninit.SecretType, obj runtime.Object, newKey bool) error { + instance := obj.(*current.IBPOrderer) + if instance.Spec.Secret == nil { + return errors.New(fmt.Sprintf("missing secret spec for instance '%s'", instance.GetName())) + } + + if instance.Spec.Secret.Enrollment != nil { + log.Info(fmt.Sprintf("Renewing %s certificate for instance '%s'", string(certType), instance.Name)) + + hsmEnabled := instance.IsHSMEnabled() + spec := instance.Spec.Secret.Enrollment + storagePath := n.GetInitStoragePath(instance) + bccsp, err := n.GetBCCSPSectionForInstance(instance) + if err != nil { + return err + } + + err = n.CertificateManager.RenewCert(certType, instance, spec, bccsp, storagePath, hsmEnabled, newKey) + if err != nil { + return err + } + } else { + return errors.New("cannot auto-renew certificate created by MSP, force renewal required") + } + + return nil +} + +func (n *Node) EnrollForEcert(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("Ecert enroll triggered via action parameter for '%s'", instance.GetName())) + + secret := instance.Spec.Secret + if secret == nil || secret.Enrollment == nil || secret.Enrollment.Component == nil { + return errors.New("unable to enroll, no ecert enrollment information provided") + } + ecertSpec := secret.Enrollment.Component + + storagePath := filepath.Join(n.GetInitStoragePath(instance), "ecert") + crypto, err := action.Enroll(instance, ecertSpec, storagePath, n.Client, n.Scheme, true, n.Config.Operator.Orderer.Timeouts.EnrollJob) + if err != nil { + return errors.Wrap(err, "failed to enroll for ecert") + } + + err = n.Initializer.GenerateSecrets("ecert", instance, crypto) + if err != nil { + return errors.Wrap(err, "failed to generate ecert secrets") + } + + return nil +} + +func (n *Node) EnrollForTLSCert(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("TLS cert enroll triggered via action parameter for '%s'", instance.GetName())) + + secret := instance.Spec.Secret + if secret == nil || secret.Enrollment == nil || secret.Enrollment.TLS == nil { + return errors.New("unable to enroll, no TLS enrollment information provided") + } + tlscertSpec := secret.Enrollment.TLS + + storagePath := filepath.Join(n.GetInitStoragePath(instance), "tls") + crypto, err := action.Enroll(instance, tlscertSpec, storagePath, n.Client, n.Scheme, false, n.Config.Operator.Orderer.Timeouts.EnrollJob) + if err != nil { + return errors.Wrap(err, "failed to enroll for TLS cert") + } + + err = n.Initializer.GenerateSecrets("tls", instance, crypto) + if err != nil { + return errors.Wrap(err, "failed to generate ecert secrets") + } + + return nil +} + +func (n *Node) FabricOrdererMigrationV2_0(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("Orderer instance '%s' migrating to v2", instance.GetName())) + + initOrderer, err := n.Initializer.GetInitOrderer(instance, n.GetInitStoragePath(instance)) + if err != nil { + return err + } + initOrderer.UsingHSMProxy = instance.UsingHSMProxy() + + ordererConfig := n.Config.OrdererInitConfig.OrdererFile + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + ordererConfig = n.Config.OrdererInitConfig.OrdererV24File + } else { + ordererConfig = n.Config.OrdererInitConfig.OrdererV2File + } + } + + switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { + case version.V2: + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + log.Info("v2.4.x Fabric Orderer requested") + v24config, err := v24ordererconfig.ReadOrdererFile(ordererConfig) + if err != nil { + return errors.Wrap(err, "failed to read v2.4.x default config file") + } + initOrderer.Config = v24config + } else if currentVer.LessThan(version.V2_4_1) { + log.Info("v2.2.x Fabric Orderer requested") + v2config, err := v2ordererconfig.ReadOrdererFile(ordererConfig) + if err != nil { + return errors.Wrap(err, "failed to read v2.2.x default config file") + } + initOrderer.Config = v2config + } + case version.V1: + fallthrough + default: + // Choosing to default to v1.4 to not break backwards comptability, if coming + // from a previous version of operator the 'FabricVersion' field would not be set and would + // result in an error. // TODO: Determine if we want to throw error or handle setting + // FabricVersion as part of migration logic. + oconfig, err := ordererconfig.ReadOrdererFile(ordererConfig) + if err != nil { + return errors.Wrap(err, "failed to read v1.4 default config file") + } + initOrderer.Config = oconfig + } + + configOverride, err := instance.GetConfigOverride() + if err != nil { + return err + } + + err = initOrderer.OverrideConfig(configOverride.(OrdererConfig)) + if err != nil { + return err + } + + if instance.IsHSMEnabled() && !instance.UsingHSMProxy() { + log.Info(fmt.Sprintf("During orderer '%s' migration, detected using HSM sidecar, setting library path", instance.GetName())) + hsmConfig, err := commonconfig.ReadHSMConfig(n.Client, instance) + if err != nil { + return err + } + initOrderer.Config.SetBCCSPLibrary(filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath))) + } + + err = n.Initializer.CreateOrUpdateConfigMap(instance, initOrderer.GetConfig()) + if err != nil { + return err + } + + return nil +} + +func (n *Node) FabricOrdererMigrationV2_4(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("Orderer instance '%s' migrating to v2.4.x", instance.GetName())) + + initOrderer, err := n.Initializer.GetInitOrderer(instance, n.GetInitStoragePath(instance)) + if err != nil { + return err + } + + ordererConfig, err := v24ordererconfig.ReadOrdererFile(n.Config.OrdererInitConfig.OrdererV24File) + if err != nil { + return errors.Wrap(err, "failed to read v2.4.x default config file") + } + + // removed the field from the struct + // ordererConfig.FileLedger.Prefix = "" + + name := fmt.Sprintf("%s-env", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: name, + Namespace: instance.Namespace, + } + + cm := &corev1.ConfigMap{} + err = n.Client.Get(context.TODO(), namespacedName, cm) + if err != nil { + return errors.Wrap(err, "failed to get env configmap") + } + + // Add configs for 2.4.x + trueVal := true + ordererConfig.Admin.TLs.Enabled = &trueVal + ordererConfig.Admin.TLs.ClientAuthRequired = &trueVal + + intermediateExists := util.IntermediateSecretExists(n.Client, instance.Namespace, fmt.Sprintf("ecert-%s-intercerts", instance.Name)) && + util.IntermediateSecretExists(n.Client, instance.Namespace, fmt.Sprintf("tls-%s-intercerts", instance.Name)) + intercertPath := "/certs/msp/tlsintermediatecerts/intercert-0.pem" + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + // Enable Channel participation for 2.4.x orderers + cm.Data["ORDERER_CHANNELPARTICIPATION_ENABLED"] = "true" + + cm.Data["ORDERER_ADMIN_TLS_ENABLED"] = "true" + cm.Data["ORDERER_ADMIN_TLS_CERTIFICATE"] = "/certs/tls/signcerts/cert.pem" + cm.Data["ORDERER_ADMIN_TLS_PRIVATEKEY"] = "/certs/tls/keystore/key.pem" + cm.Data["ORDERER_ADMIN_TLS_CLIENTAUTHREQUIRED"] = "true" + if intermediateExists { + // override intermediate cert paths for root and clientroot cas + cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = intercertPath + cm.Data["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = intercertPath + } else { + cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = "/certs/msp/tlscacerts/cacert-0.pem" + cm.Data["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = "/certs/msp/tlscacerts/cacert-0.pem" + } + } + + err = n.Client.Update(context.TODO(), cm, controllerclient.UpdateOption{Owner: instance, Scheme: n.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to update env configmap") + } + + initOrderer.Config = ordererConfig + configOverride, err := instance.GetConfigOverride() + if err != nil { + return err + } + + err = initOrderer.OverrideConfig(configOverride.(OrdererConfig)) + if err != nil { + return err + } + + if instance.IsHSMEnabled() && !instance.UsingHSMProxy() { + log.Info(fmt.Sprintf("During orderer '%s' migration, detected using HSM sidecar, setting library path", instance.GetName())) + hsmConfig, err := commonconfig.ReadHSMConfig(n.Client, instance) + if err != nil { + return err + } + initOrderer.Config.SetBCCSPLibrary(filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath))) + } + + err = n.Initializer.CreateOrUpdateConfigMap(instance, initOrderer.GetConfig()) + if err != nil { + return err + } + return nil +} + +func (n *Node) ReconcileHSMImages(instance *current.IBPOrderer) bool { + hsmConfig, err := commonconfig.ReadHSMConfig(n.Client, instance) + if err != nil { + return false + } + + if hsmConfig.Library.AutoUpdateDisabled { + return false + } + + updated := false + if hsmConfig.Library.Image != "" { + hsm := strings.Split(hsmConfig.Library.Image, ":") + image := hsm[0] + tag := hsm[1] + + if instance.Spec.Images.HSMImage != image { + instance.Spec.Images.HSMImage = image + updated = true + } + + if instance.Spec.Images.HSMTag != tag { + instance.Spec.Images.HSMTag = tag + updated = true + } + } + + return updated +} + +func (n *Node) HandleActions(instance *current.IBPOrderer, update Update) error { + orig := instance.DeepCopy() + + if update.EcertReenrollNeeded() { + if err := n.ReenrollEcert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetEcertReenroll() + return err + } + instance.ResetEcertReenroll() + } + + if update.TLScertReenrollNeeded() { + if err := n.ReenrollTLSCert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSReenroll() + return err + } + instance.ResetTLSReenroll() + } + + if update.EcertNewKeyReenroll() { + if err := n.ReenrollEcertNewKey(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetEcertReenroll() + return err + } + instance.ResetEcertReenroll() + } + + if update.TLScertNewKeyReenroll() { + if err := n.ReenrollTLSCertNewKey(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSReenroll() + return err + } + instance.ResetTLSReenroll() + } + + if update.EcertEnroll() { + if err := n.EnrollForEcert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetEcertEnroll() + return err + } + instance.ResetEcertEnroll() + } + + if update.TLScertEnroll() { + if err := n.EnrollForTLSCert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSEnroll() + return err + } + instance.ResetTLSEnroll() + } + + // This should be the last action checked + if update.RestartNeeded() { + if err := n.RestartAction(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetRestart() + return err + } + instance.ResetRestart() + } + + if err := n.Client.Patch(context.TODO(), instance, k8sclient.MergeFrom(orig)); err != nil { + return errors.Wrap(err, "failed to reset action flags") + } + + return nil +} + +func (n *Node) ReenrollEcert(instance *current.IBPOrderer) error { + log.Info("Ecert reenroll triggered via action parameter") + if err := n.reenrollCert(instance, commoninit.ECERT, false); err != nil { + return errors.Wrap(err, "ecert reenroll reusing existing private key action failed") + } + return nil +} + +func (n *Node) ReenrollEcertNewKey(instance *current.IBPOrderer) error { + log.Info("Ecert with new key reenroll triggered via action parameter") + if err := n.reenrollCert(instance, commoninit.ECERT, true); err != nil { + return errors.Wrap(err, "ecert reenroll with new key action failed") + } + return nil +} + +func (n *Node) ReenrollTLSCert(instance *current.IBPOrderer) error { + log.Info("TLS reenroll triggered via action parameter") + if err := n.reenrollCert(instance, commoninit.TLS, false); err != nil { + return errors.Wrap(err, "tls reenroll reusing existing private key action failed") + } + return nil +} + +func (n *Node) ReenrollTLSCertNewKey(instance *current.IBPOrderer) error { + log.Info("TLS with new key reenroll triggered via action parameter") + if err := n.reenrollCert(instance, commoninit.TLS, true); err != nil { + return errors.Wrap(err, "tls reenroll with new key action failed") + } + return nil +} + +func (n *Node) reenrollCert(instance *current.IBPOrderer, certType commoninit.SecretType, newKey bool) error { + return action.Reenroll(n, n.Client, certType, instance, newKey) +} + +func (n *Node) RestartAction(instance *current.IBPOrderer) error { + log.Info("Restart triggered via action parameter") + if err := n.Restart.ForRestartAction(instance); err != nil { + return errors.Wrap(err, "failed to restart orderer node pods") + } + return nil +} + +func (n *Node) HandleRestart(instance *current.IBPOrderer, update Update) error { + // If restart is disabled for components, can return immediately + if n.Config.Operator.Restart.Disable.Components { + return nil + } + + err := n.Restart.TriggerIfNeeded(instance) + if err != nil { + return errors.Wrap(err, "failed to restart deployment") + } + + return nil +} + +func (n *Node) CustomLogic(instance *current.IBPOrderer, update Update) (*current.CRStatus, *common.Result, error) { + var status *current.CRStatus + var err error + return status, nil, err +} diff --git a/pkg/offering/base/orderer/node_test.go b/pkg/offering/base/orderer/node_test.go new file mode 100644 index 00000000..f9ccd366 --- /dev/null +++ b/pkg/offering/base/orderer/node_test.go @@ -0,0 +1,724 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseorderer_test + +import ( + "context" + "encoding/json" + "fmt" + "os" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mspparser" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + oconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/mocks" + orderermocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +var _ = Describe("Base Orderer Node", func() { + var ( + node *baseorderer.Node + instance *current.IBPOrderer + mockKubeClient *cmocks.Client + + deploymentMgr *orderermocks.DeploymentManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + configMapMgr *managermocks.ResourceManager + + certificateMgr *orderermocks.CertificateManager + initializer *orderermocks.InitializeIBPOrderer + update *mocks.Update + cfg *config.Config + ) + + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + ExternalAddress: "address", + Domain: "domain", + HSM: ¤t.HSM{ + PKCS11Endpoint: "tcp://0.0.0.0:2346", + }, + Images: ¤t.OrdererImages{ + OrdererTag: "1.4.9-20200611", + }, + Replicas: &replicas, + FabricVersion: "1.4.9", + }, + } + instance.Kind = "IBPOrderer" + instance.Name = "orderer1" + instance.Namespace = "random" + nodeNumber := 1 + instance.Spec.NodeNumber = &nodeNumber + instance.Status.Version = version.Operator + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPOrderer: + o := obj.(*current.IBPOrderer) + o.Kind = "IBPOrderer" + instance = o + case *corev1.Service: + o := obj.(*corev1.Service) + o.Spec.Type = corev1.ServiceTypeNodePort + o.Spec.Ports = append(o.Spec.Ports, corev1.ServicePort{ + Name: "orderer-api", + TargetPort: intstr.IntOrString{ + IntVal: 7051, + }, + NodePort: int32(7051), + }) + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case "ecert-" + instance.Name + "-signcert": + o.Name = "ecert-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "ecert-" + instance.Name + "-keystore": + o.Name = "ecert-" + instance.Name + "-keystore" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "tls-" + instance.Name + "-signcert": + o.Name = "ecert-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "tls-" + instance.Name + "-keystore": + o.Name = "ecert-" + instance.Name + "-keystore" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "tls-" + instance.Name + "-cacerts": + o.Name = "ecert-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "ecert-" + instance.Name + "-cacerts": + o.Name = "ecert-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cacert-0.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + } + } + return nil + } + + deploymentMgr = &orderermocks.DeploymentManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + configMapMgr = &managermocks.ResourceManager{} + roleMgr := &managermocks.ResourceManager{} + roleBindingMgr := &managermocks.ResourceManager{} + serviceAccountMgr := &managermocks.ResourceManager{} + + initializer = &orderermocks.InitializeIBPOrderer{} + initializer.GetInitOrdererReturns(&ordererinit.Orderer{}, nil) + + certificateMgr = &orderermocks.CertificateManager{} + restartMgr := &orderermocks.RestartManager{} + + cfg = &config.Config{ + OrdererInitConfig: &ordererinit.Config{ + ConfigTxFile: "../../../../defaultconfig/orderer/configtx.yaml", + OUFile: "../../../../defaultconfig/orderer/ouconfig.yaml", + OrdererFile: "../../../../defaultconfig/orderer/orderer.yaml", + }, + Operator: config.Operator{ + Versions: &deployer.Versions{ + Orderer: map[string]deployer.VersionOrderer{ + "1.4.9-0": { + Default: true, + Image: deployer.OrdererImages{ + OrdererImage: "ordererimage", + OrdererTag: "1.4.9-amd64", + OrdererInitImage: "ordererinitimage", + OrdererInitTag: "1.4.9-amd64", + }, + }, + }, + }, + }, + } + + node = &baseorderer.Node{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: cfg, + + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + EnvConfigMapManager: configMapMgr, + PVCManager: pvcMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + + CertificateManager: certificateMgr, + RenewCertTimers: make(map[string]*time.Timer), + Initializer: initializer, + Restart: restartMgr, + } + }) + + Context("pre reconcile checks", func() { + Context("version and images", func() { + Context("create CR", func() { + It("returns an error if fabric version is not set in spec", func() { + instance.Spec.FabricVersion = "" + _, err := node.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + + Context("images section blank", func() { + BeforeEach(func() { + instance.Spec.Images = nil + }) + + It("normalizes fabric version and requests a requeue", func() { + instance.Spec.FabricVersion = "1.4.9" + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + }) + + It("returns an error if fabric version not supported", func() { + instance.Spec.FabricVersion = "0.0.1" + _, err := node.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version '0.0.1' is not supported"))) + }) + + When("version is passed without hyphen", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9" + }) + + It("finds default version for release and updates images section", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ordererimage", + OrdererTag: "1.4.9-amd64", + OrdererInitImage: "ordererinitimage", + OrdererInitTag: "1.4.9-amd64", + })) + }) + }) + + When("version is passed with hyphen", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("looks images and updates images section", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ordererimage", + OrdererTag: "1.4.9-amd64", + OrdererInitImage: "ordererinitimage", + OrdererInitTag: "1.4.9-amd64", + })) + }) + }) + }) + + Context("images section passed", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.0", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.0", + } + }) + + When("version is not passed", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "" + }) + + It("returns an error", func() { + _, err := node.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + }) + + When("version is passed", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.0.0-8" + }) + + It("persists current spec configuration", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.0-8")) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.0", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.0", + })) + }) + }) + }) + }) + + Context("update CR", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.0.1-0" + instance.Spec.Images = ¤t.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.1", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.1", + } + }) + + When("images updated", func() { + BeforeEach(func() { + update.ImagesUpdatedReturns(true) + instance.Spec.Images = ¤t.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.8", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.8", + } + }) + + Context("and version updated", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + instance.Spec.FabricVersion = "2.0.1-8" + }) + + It("persists current spec configuration", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.1-8")) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.8", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.8", + })) + }) + }) + + Context("and version not updated", func() { + It("persists current spec configuration", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.1-0")) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.8", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.8", + })) + }) + }) + }) + + When("images not updated", func() { + Context("and version updated during operator migration", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + instance.Spec.FabricVersion = "unsupported" + }) + + It("persists current spec configuration", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("unsupported")) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.1", + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.1", + })) + }) + }) + + Context("and version updated (not during operator migration)", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + }) + + When("using non-hyphenated version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9" + }) + + It("looks images and updates images section", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ordererimage", + OrdererTag: "1.4.9-amd64", + OrdererInitImage: "ordererinitimage", + OrdererInitTag: "1.4.9-amd64", + })) + }) + }) + + When("using hyphenated version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("looks images and updates images section", func() { + requeue, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + Expect(*instance.Spec.Images).To(Equal(current.OrdererImages{ + OrdererImage: "ordererimage", + OrdererTag: "1.4.9-amd64", + OrdererInitImage: "ordererinitimage", + OrdererInitTag: "1.4.9-amd64", + })) + }) + }) + }) + }) + }) + }) + + Context("hsm image updates", func() { + var ( + hsmConfig = &commonconfig.HSMConfig{ + Library: commonconfig.Library{ + Image: "ghcr.io/ibm-blockchain/hsmimage:1.0.0", + }, + } + ) + + BeforeEach(func() { + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + + bytes, err := yaml.Marshal(hsmConfig) + Expect(err).NotTo(HaveOccurred()) + + o.Data = map[string]string{ + "ibp-hsm-config.yaml": string(bytes), + } + } + return nil + } + }) + + It("updates hsm image and tag if passed through operator config", func() { + updated, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(true)) + Expect(instance.Spec.Images.HSMImage).To(Equal("ghcr.io/ibm-blockchain/hsmimage")) + Expect(instance.Spec.Images.HSMTag).To(Equal("1.0.0")) + }) + + It("doesn't update hsm image and tag if hsm update is disabled", func() { + hsmConfig.Library.AutoUpdateDisabled = true + + updated, err := node.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(Equal(false)) + Expect(instance.Spec.Images.HSMImage).To(Equal("")) + Expect(instance.Spec.Images.HSMTag).To(Equal("")) + }) + }) + }) + + Context("Reconciles", func() { + It("returns nil and will requeue update request if instance version is updated", func() { + instance.Status.Version = "" + _, err := node.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchStatusCallCount()).To(Equal(1)) + }) + It("returns a breaking error if initialization fails", func() { + cfg.OrdererInitConfig.OrdererFile = "../../../../defaultconfig/orderer/badfile.yaml" + node.Initializer = ordererinit.New(nil, nil, cfg.OrdererInitConfig, "", nil) + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 21 - failed to initialize orderer node")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("returns an error for invalid HSM endpoint", func() { + instance.Spec.HSM.PKCS11Endpoint = "tcp://:2346" + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("failed pre reconcile checks: invalid HSM endpoint for orderer instance '%s': missing IP address", instance.Name))) + }) + + It("returns an error domain is not set", func() { + instance.Spec.Domain = "" + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("failed pre reconcile checks: domain not set for orderer instance '%s'", instance.Name))) + }) + + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + configMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Env ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := node.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("reconciles IBPOrderer", func() { + _, err := node.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + + }) + + Context("fabric orderer migration", func() { + BeforeEach(func() { + overrides := &oconfig.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "pkcs11", + PKCS11: &commonapi.PKCS11Opts{ + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "msp/keystore", + }, + }, + }, + }, + }, + } + bytes, err := json.Marshal(overrides) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: bytes} + + coreBytes, err := yaml.Marshal(overrides) + Expect(err).NotTo(HaveOccurred()) + + cm := &corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "orderer.yaml": coreBytes, + }, + } + initializer.GetConfigFromConfigMapReturns(cm, nil) + }) + + When("fabric orderer tag is less than 1.4.7", func() { + BeforeEach(func() { + instance.Spec.Images.OrdererTag = "1.4.6-20200611" + }) + + It("returns without updating config", func() { + ordererConfig, err := node.FabricOrdererMigration(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(ordererConfig).To(BeNil()) + }) + }) + + When("hsm is not enabled", func() { + BeforeEach(func() { + overrides := &oconfig.Orderer{ + Orderer: v1.Orderer{ + General: v1.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "sw", + PKCS11: &commonapi.PKCS11Opts{ + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "msp/keystore", + }, + }, + }, + }, + }, + } + bytes, err := json.Marshal(overrides) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: bytes} + }) + + It("returns without updating config", func() { + ordererConfig, err := node.FabricOrdererMigration(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(ordererConfig).To(BeNil()) + }) + }) + + It("removes keystore path value", func() { + ordererConfig, err := node.FabricOrdererMigration(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(ordererConfig.General.BCCSP.PKCS11.FileKeyStore).To(BeNil()) + }) + }) + + Context("initialize", func() { + BeforeEach(func() { + config := v2config.Orderer{ + Orderer: v2.Orderer{ + General: v2.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "PKCS11", + }, + }, + }, + } + + bytes, err := json.Marshal(config) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: bytes} + }) + + It("sets PKCS11_PROXY_SOCKET environment variable", func() { + err := node.Initialize(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Getenv("PKCS11_PROXY_SOCKET")).To(Equal("tcp://0.0.0.0:2346")) + }) + + }) + + Context("update connection profile", func() { + It("returns error if fails to get cert", func() { + mockKubeClient.GetReturns(errors.New("get error")) + err := node.UpdateConnectionProfile(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("get error")) + }) + + It("updates connection profile cm", func() { + err := node.UpdateConnectionProfile(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.GetCallCount()).To(Equal(7)) + }) + }) + + Context("update msp certificates", func() { + const testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + + BeforeEach(func() { + msp := ¤t.MSP{ + SignCerts: testcert, + CACerts: []string{testcert}, + KeyStore: "keystore", + } + + initializer.GetUpdatedOrdererReturns(&ordererinit.Orderer{ + Cryptos: &commonconfig.Cryptos{ + TLS: &mspparser.MSPParser{ + Config: msp, + }, + }, + }, nil) + + }) + + It("returns error if fails to get update msp parsers", func() { + initializer.GetUpdatedOrdererReturns(nil, errors.New("get error")) + err := node.UpdateMSPCertificates(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("get error")) + }) + + It("returns error if fails to generate crypto", func() { + initializer.GetUpdatedOrdererReturns(&ordererinit.Orderer{ + Cryptos: &commonconfig.Cryptos{ + TLS: &mspparser.MSPParser{ + Config: ¤t.MSP{ + SignCerts: "invalid", + }, + }, + }, + }, nil) + err := node.UpdateMSPCertificates(instance) + Expect(err).To(HaveOccurred()) + }) + + It("returns error if fails to update tls secrets", func() { + initializer.UpdateSecretsReturnsOnCall(1, errors.New("update error")) + err := node.UpdateMSPCertificates(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to update tls secrets: update error")) + }) + + It("updates secrets of certificates passed through MSP spec", func() { + err := node.UpdateMSPCertificates(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(initializer.UpdateSecretsCallCount()).To(Equal(3)) + }) + }) +}) diff --git a/pkg/offering/base/orderer/orderer.go b/pkg/offering/base/orderer/orderer.go new file mode 100644 index 00000000..094d3c62 --- /dev/null +++ b/pkg/offering/base/orderer/orderer.go @@ -0,0 +1,883 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseorderer + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + orderer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/configtx" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/gogo/protobuf/proto" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" + "github.com/hyperledger/fabric/bccsp" + fmsp "github.com/hyperledger/fabric/msp" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("base_orderer") + +const ( + defaultOrdererNode = "./definitions/orderer/orderernode.yaml" +) + +//go:generate counterfeiter -o mocks/node_manager.go -fake-name NodeManager . NodeManager + +type NodeManager interface { + GetNode(int, map[string]*time.Timer, RestartManager) *Node +} + +var _ IBPOrderer = &Orderer{} + +type Orderer struct { + Client k8sclient.Client + Scheme *runtime.Scheme + Config *config.Config + + NodeManager NodeManager + OrdererNodeManager resources.Manager + + Override Override + RenewCertTimers map[string]*time.Timer + RestartManager *restart.RestartManager +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config, o Override) *Orderer { + orderer := &Orderer{ + Client: client, + Scheme: scheme, + Config: config, + NodeManager: &Manager{ + Client: client, + Scheme: scheme, + Config: config, + }, + Override: o, + RenewCertTimers: make(map[string]*time.Timer), + RestartManager: restart.New(client, config.Operator.Restart.WaitTime.Get(), config.Operator.Restart.Timeout.Get()), + } + orderer.CreateManagers() + return orderer +} + +func (o *Orderer) CreateManagers() { + resourceManager := resourcemanager.New(o.Client, o.Scheme) + o.OrdererNodeManager = resourceManager.CreateOrderernodeManager("", o.Override.OrdererNode, o.GetLabels, defaultOrdererNode) +} + +func (o *Orderer) PreReconcileChecks(instance *current.IBPOrderer, update Update) (bool, error) { + if strings.ToLower(instance.Spec.OrdererType) != "etcdraft" { + return false, operatorerrors.New(operatorerrors.InvalidOrdererType, fmt.Sprintf("orderer type '%s' is not supported", instance.Spec.OrdererType)) + } + + size := instance.Spec.ClusterSize + if instance.Spec.NodeNumber == nil && instance.Spec.ClusterLocation != nil && instance.Spec.ClusterSize != 0 && len(instance.Spec.ClusterLocation) != size { + return false, operatorerrors.New(operatorerrors.InvalidOrdererType, "Number of Cluster Node Locations does not match cluster size") + } + + if instance.Spec.NodeNumber == nil && instance.Spec.ClusterSecret == nil { + return false, operatorerrors.New(operatorerrors.InvalidOrdererType, "Cluster MSP Secrets should be passed") + } + + if instance.Spec.NodeNumber == nil && instance.Spec.ClusterSecret != nil && instance.Spec.ClusterSize != 0 && len(instance.Spec.ClusterSecret) != size { + return false, operatorerrors.New(operatorerrors.InvalidOrdererType, "Number of Cluster MSP Secrets does not match cluster size") + } + + if instance.Spec.NodeNumber == nil && instance.Spec.ClusterConfigOverride != nil && instance.Spec.ClusterSize != 0 && len(instance.Spec.ClusterConfigOverride) != size { + return false, operatorerrors.New(operatorerrors.InvalidOrdererType, "Number of Cluster Override does not match cluster size") + } + + var maxNameLength *int + if instance.Spec.ConfigOverride != nil { + override := &orderer.OrdererOverrides{} + err := json.Unmarshal(instance.Spec.ConfigOverride.Raw, override) + if err != nil { + return false, err + } + maxNameLength = override.MaxNameLength + } + + err := util.ValidationChecks(instance.TypeMeta, instance.ObjectMeta, "IBPOrderer", maxNameLength) + if err != nil { + return false, err + } + + sizeUpdate := o.ClusterSizeUpdate(instance) + if sizeUpdate { + log.Info("Updating instance with default cluster size of 1") + err = o.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return false, err + } + return true, nil + } + + return false, nil +} + +func (o *Orderer) ClusterSizeUpdate(instance *current.IBPOrderer) bool { + size := instance.Spec.ClusterSize + if size == 0 { + instance.Spec.ClusterSize = 1 + return true + } + + return false +} + +func (o *Orderer) Initialize(instance *current.IBPOrderer, update Update) error { + // NO-OP + return nil +} + +func (o *Orderer) ReconcileManagers(instance *current.IBPOrderer, update Update, genesisBlock []byte) error { + var b64GenesisBlock string + + b64GenesisBlock = util.BytesToBase64(genesisBlock) + + for k := 0; k < instance.Spec.ClusterSize; k++ { + nodenumber := k + 1 + nodeinstance := instance.DeepCopy() + nodeinstance.Spec.NodeNumber = &nodenumber + nodeinstance.Spec.ClusterSize = 1 + nodeinstance.Spec.GenesisBlock = b64GenesisBlock + if len(instance.Spec.ClusterConfigOverride) != 0 { + nodeinstance.Spec.ConfigOverride = instance.Spec.ClusterConfigOverride[k] + } + nodeinstance.Spec.Secret = instance.Spec.ClusterSecret[k] + err := o.OrdererNodeManager.Reconcile(nodeinstance, false) + if err != nil { + return err + } + } + + return nil +} + +func (o *Orderer) UpdateNodesWithGenesisBlock(genesisBlock string, nodes []current.IBPOrderer) error { + log.Info("Updating nodes with genesis block if missing") + + for _, node := range nodes { + if node.Spec.GenesisBlock == "" { + log.Info(fmt.Sprintf("Updating node '%s'", node.Name)) + + node.Spec.GenesisBlock = genesisBlock + nodeRef := node + err := o.Client.Patch(context.TODO(), &nodeRef, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return err + } + } + } + + return nil +} + +func (o *Orderer) Reconcile(instance *current.IBPOrderer, update Update) (common.Result, error) { + return common.Result{}, errors.New("base orderer reconcile not implemented, needs to be implemented by offering") +} + +func (o *Orderer) ReconcileCluster(instance *current.IBPOrderer, update Update, addHostPortToProfile func(*configtx.Profile, *current.IBPOrderer) error) (common.Result, error) { + log.Info(fmt.Sprintf("Reconciling Orderer Cluster %s", instance.GetName())) + var err error + + size := instance.Spec.ClusterSize + nodes, err := o.GetClusterNodes(instance) + if err != nil { + return common.Result{}, err + } + + if len(nodes.Items) == size { + if instance.Spec.IsPrecreateOrderer() { + return common.Result{}, err + } + } + + for _, node := range nodes.Items { + log.Info(fmt.Sprintf("GetClusterNodes returned node '%s'", node.Name)) + } + + log.Info(fmt.Sprintf("Size of cluster (number of nodes): %d", size)) + + var genesisBlock []byte + if len(nodes.Items) == size && !instance.Spec.IsUsingChannelLess() { + // Wait till all nodes are in precreated state before generating genesis block. + // Once in precreate state, the TLS certs and service should exists for genesis + // block creation + deployedNodes := 0 + for _, node := range nodes.Items { + if node.Status.Type == current.Deployed { + deployedNodes++ + } else { + log.Info(fmt.Sprintf("Node '%s' hasn't deployed yet, checking if in precreated state", node.GetName())) + if node.Status.Type != current.Precreated { + log.Info(fmt.Sprintf("Node '%s' hasn't entered precreated state, requeue request, another check to be made at next reconcile", node.GetName())) + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + } + } + + // If all nodes are deployed state and parent hasn't deployed yet, ensure that + // all genesis secrets are found. If all required genesis secrets are not present + // continue with generating secrets + genesisSecretsFound := 0 + for _, node := range nodes.Items { + + nn := types.NamespacedName{ + Name: node.Name + "-genesis", + Namespace: node.Namespace, + } + + err := o.Client.Get(context.TODO(), nn, &corev1.Secret{}) + if err == nil { + genesisSecretsFound++ + } + } + + // If all genesis secrets found, nothing left to do by parent of cluster nodes + if genesisSecretsFound == len(nodes.Items) { + return common.Result{}, nil + } + + log.Info(fmt.Sprintf("All nodes have been precreated by cluster reconcile for parent: %s", instance.GetName())) + + genesisBlock, err = o.GenerateGenesisBlock(instance, addHostPortToProfile) + if err != nil { + return common.Result{}, err + } + + log.Info(fmt.Sprintf("Finished generating genesis block for cluster '%s'", instance.GetName())) + + b64GenesisBlock := util.BytesToBase64(genesisBlock) + err = o.UpdateNodesWithGenesisBlock(b64GenesisBlock, nodes.Items) + if err != nil { + return common.Result{}, err + } + + err = o.GenerateGenesisSecretForNodes(genesisBlock, nodes.Items) + if err != nil { + return common.Result{}, err + } + + log.Info("Finished generating genesis secrets") + + return common.Result{}, err + } + + if instance.Status.Type == "" || instance.Status.Type == current.Deploying { + for i := 1; i <= size; i++ { + err := o.CreateNodeCR(instance, i) + if err != nil { + return common.Result{}, err + } + } + } + + if !version.String(instance.Status.Version).Equal(version.Operator) { + log.Info(fmt.Sprintf("[Reconcile cluster] Setting version to %s for instance %s", version.Operator, instance.Name)) + instance.Status.Version = version.Operator + err = o.PatchStatus(instance) + if err != nil { + return common.Result{}, err + } + } + + return common.Result{}, nil +} + +func (o *Orderer) GenerateGenesisSecretForNodes(genesisBlock []byte, nodes []current.IBPOrderer) error { + log.Info("Generating genesis secret for all nodes") + + for _, node := range nodes { + log.Info(fmt.Sprintf("Processing node '%s' for genesis secret", node.Name)) + s := &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: node.Name + "-genesis", + Namespace: node.Namespace, + Labels: node.GetLabels(), + }, + Data: map[string][]byte{ + "orderer.block": genesisBlock, + }, + } + + log.Info(fmt.Sprintf("Creating secret '%s'", s.Name)) + nodeRef := node + err := o.Client.Create(context.TODO(), s, k8sclient.CreateOption{Owner: &nodeRef, Scheme: o.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to create orderer node's genesis secret") + } + } + + return nil +} + +func (o *Orderer) SetVersion(instance *current.IBPOrderer) (bool, error) { + if instance.Status.Version == "" || !version.String(instance.Status.Version).Equal(version.Operator) { + log.Info("Version of Operator: ", "version", version.Operator) + log.Info(fmt.Sprintf("Version of CR '%s': %s", instance.GetName(), instance.Status.Version)) + log.Info(fmt.Sprintf("Setting '%s' to version '%s'", instance.Name, version.Operator)) + + instance.Status.Version = version.Operator + err := o.PatchStatus(instance) + if err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (o *Orderer) GetClusterNodes(instance *current.IBPOrderer) (current.IBPOrdererList, error) { + ordererList := current.IBPOrdererList{} + + labelSelector, err := labels.Parse(fmt.Sprintf("parent=%s", instance.GetName())) + if err != nil { + return ordererList, errors.Wrap(err, "failed to parse selector for parent name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: instance.GetNamespace(), + } + + err = o.Client.List(context.TODO(), &ordererList, listOptions) + if err != nil { + return ordererList, err + } + + return ordererList, nil +} + +func (o *Orderer) CreateNodeCR(instance *current.IBPOrderer, number int) error { + if instance.Spec.NodeNumber != nil { + return fmt.Errorf("only parent orderer can create nodes custom resources, instance '%s' is not a parent", instance.GetName()) + } + + if !instance.Spec.License.Accept { + return errors.New("user must accept license before continuing") + } + + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + name := fmt.Sprintf("%snode%d", instance.GetName(), number) + node := instance.DeepCopy() + node.ObjectMeta = metav1.ObjectMeta{ + Name: name, + Namespace: instance.GetNamespace(), + Labels: map[string]string{ + "app": name, + "creator": label, + "parent": instance.GetName(), + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "orderer", + "app.kubernetes.io/managed-by": label + "-operator", + }, + } + + log.Info(fmt.Sprintf("Cluster reconcile is precreating node '%s'", node.Name)) + + if len(node.Spec.ClusterConfigOverride) >= number { + node.Spec.ConfigOverride = node.Spec.ClusterConfigOverride[number-1] + } + + if len(node.Spec.ClusterSecret) >= number { + node.Spec.Secret = node.Spec.ClusterSecret[number-1] + } + + if len(node.Spec.ClusterLocation) >= number { + node.Spec.Zone = node.Spec.ClusterLocation[number-1].Zone + node.Spec.Region = node.Spec.ClusterLocation[number-1].Region + + if node.Spec.Zone != "" && node.Spec.Region == "" { + node.Spec.Region = "select" + } + } + + if instance.Spec.IsUsingChannelLess() { + node.Spec.UseChannelLess = instance.Spec.UseChannelLess + } else { + node.Spec.IsPrecreate = ¤t.BoolTrue + } + node.Spec.NodeNumber = &number + node.Spec.ClusterSize = 1 + node.Spec.ClusterSecret = nil + node.Spec.ClusterConfigOverride = nil + node.Spec.ClusterLocation = nil + + err := o.Client.Create(context.TODO(), node) + if err != nil { + return err + } + + if instance.Status.Version != version.Operator { + log.Info(fmt.Sprintf("[Create Node CR] Setting version to %s for node %s", version.Operator, node.Name)) + node.Status.Version = version.Operator + // Using Update instead of Patch status;error will be thrown when trying to get and merge instance during + // Patch. Update status will work here because the node has just been created so its spec will not have updated + // before setting its version. + err = o.UpdateStatus(node) + if err != nil { + return err + } + } + + return nil +} + +func (o *Orderer) ReconcileNode(instance *current.IBPOrderer, update bool) (reconcile.Result, error) { + return reconcile.Result{}, errors.New("base orderer reconcile node not implemented, needs to be implemented by offering") +} + +func (o *Orderer) GenerateGenesisBlock(instance *current.IBPOrderer, addHostPortToProfile func(*configtx.Profile, *current.IBPOrderer) error) ([]byte, error) { + log.Info("Generating genesis block") + initProfile, err := o.LoadInitialProfile(instance) + if err != nil { + return nil, err + } + + err = addHostPortToProfile(initProfile, instance) + if err != nil { + return nil, err + } + + conf := initProfile.Orderer + mspConfigs := map[string]*msp.MSPConfig{} + for _, org := range conf.Organizations { + var err error + mspConfigs[org.Name], err = o.GetMSPConfig(instance, org.ID) + if err != nil { + return nil, errors.Wrap(err, "failed to create orderer org") + } + } + + genesisBlock, err := initProfile.GenerateBlock(instance.Spec.SystemChannelName, mspConfigs) + if err != nil { + return nil, err + } + + return genesisBlock, nil +} + +func (o *Orderer) LoadInitialProfile(instance *current.IBPOrderer) (*configtx.Profile, error) { + profile := instance.Spec.GenesisProfile + if profile == "" { + profile = "Initial" + } + + log.Info(fmt.Sprintf("Profile '%s' used for genesis creation", profile)) + + configTx := configtx.New() + initProfile, err := configTx.GetProfile(profile) + if err != nil { + return nil, err + } + + org := &configtx.Organization{ + Name: instance.Spec.OrgName, + ID: instance.Spec.MSPID, + MSPType: "bccsp", + MSPDir: "/certs/msp", + AdminPrincipal: "Role.MEMBER", + } + err = initProfile.AddOrgToOrderer(org) + if err != nil { + return nil, err + } + + return initProfile, nil +} + +func (o *Orderer) AddHostPortToProfile(initProfile *configtx.Profile, instance *current.IBPOrderer) error { + log.Info("Adding hosts to genesis block") + + nodes := o.GetNodes(instance) + for _, node := range nodes { + n := types.NamespacedName{ + Name: fmt.Sprintf("tls-%s%s-signcert", instance.Name, node.Name), + Namespace: instance.Namespace, + } + + // To avoid the race condition of the TLS signcert secret not existing, need to poll for it's + // existence before proceeding + tlsSecret := &corev1.Secret{} + err := wait.Poll(500*time.Millisecond, o.Config.Operator.Orderer.Timeouts.SecretPoll.Get(), func() (bool, error) { + err := o.Client.Get(context.TODO(), n, tlsSecret) + if err == nil { + return true, nil + } + return false, nil + }) + if err != nil { + return errors.Wrapf(err, "failed to find secret '%s'", n.Name) + } + + domain := instance.Spec.Domain + fqdn := instance.Namespace + "-" + instance.Name + node.Name + "-orderer" + "." + domain + + log.Info(fmt.Sprintf("Adding consentor domain '%s' to genesis block", fqdn)) + + initProfile.AddOrdererAddress(fmt.Sprintf("%s:%d", fqdn, 443)) + consentors := &etcdraft.Consenter{ + Host: fqdn, + Port: 443, + ClientTlsCert: tlsSecret.Data["cert.pem"], + ServerTlsCert: tlsSecret.Data["cert.pem"], + } + err = initProfile.AddRaftConsentingNode(consentors) + if err != nil { + return err + } + } + return nil +} + +func (o *Orderer) GetMSPConfig(instance *current.IBPOrderer, ID string) (*msp.MSPConfig, error) { + isIntermediate := false + admincert := [][]byte{} + n := types.NamespacedName{ + Name: fmt.Sprintf("ecert-%s%s%d-admincerts", instance.Name, NODE, 1), + Namespace: instance.Namespace, + } + adminCert := &corev1.Secret{} + err := o.Client.Get(context.TODO(), n, adminCert) + if err != nil { + if !k8serrors.IsNotFound(err) { + return nil, err + } + } + for _, cert := range adminCert.Data { + admincert = append(admincert, cert) + } + + cacerts := [][]byte{} + n.Name = fmt.Sprintf("ecert-%s%s%d-cacerts", instance.Name, NODE, 1) + caCerts := &corev1.Secret{} + err = o.Client.Get(context.TODO(), n, caCerts) + if err != nil { + return nil, err + } + for _, cert := range caCerts.Data { + cacerts = append(cacerts, cert) + } + + intermediateCerts := [][]byte{} + interCerts := &corev1.Secret{} + n.Name = fmt.Sprintf("ecert-%s%s%d-intercerts", instance.Name, NODE, 1) + err = o.Client.Get(context.TODO(), n, interCerts) + if err != nil { + if !k8serrors.IsNotFound(err) { + return nil, err + } + } + for _, cert := range interCerts.Data { + isIntermediate = true + intermediateCerts = append(intermediateCerts, cert) + } + + cryptoConfig := &msp.FabricCryptoConfig{ + SignatureHashFamily: bccsp.SHA2, + IdentityIdentifierHashFunction: bccsp.SHA256, + } + + tlsCACerts := [][]byte{} + n.Name = fmt.Sprintf("tls-%s%s%d-cacerts", instance.Name, NODE, 1) + tlsCerts := &corev1.Secret{} + err = o.Client.Get(context.TODO(), n, tlsCerts) + if err != nil { + return nil, err + } + for _, cert := range tlsCerts.Data { + tlsCACerts = append(tlsCACerts, cert) + } + + tlsIntermediateCerts := [][]byte{} + tlsInterCerts := &corev1.Secret{} + n.Name = fmt.Sprintf("tls-%s%s%d-intercerts", instance.Name, NODE, 1) + err = o.Client.Get(context.TODO(), n, tlsInterCerts) + if err != nil { + if !k8serrors.IsNotFound(err) { + return nil, err + } + } + for _, cert := range tlsInterCerts.Data { + tlsIntermediateCerts = append(tlsIntermediateCerts, cert) + } + + fmspconf := &msp.FabricMSPConfig{ + Admins: admincert, + RootCerts: cacerts, + IntermediateCerts: intermediateCerts, + Name: ID, + CryptoConfig: cryptoConfig, + TlsRootCerts: tlsCACerts, + TlsIntermediateCerts: tlsIntermediateCerts, + FabricNodeOus: &msp.FabricNodeOUs{ + Enable: true, + ClientOuIdentifier: &msp.FabricOUIdentifier{ + OrganizationalUnitIdentifier: "client", + Certificate: cacerts[0], + }, + PeerOuIdentifier: &msp.FabricOUIdentifier{ + OrganizationalUnitIdentifier: "peer", + Certificate: cacerts[0], + }, + AdminOuIdentifier: &msp.FabricOUIdentifier{ + OrganizationalUnitIdentifier: "admin", + Certificate: cacerts[0], + }, + OrdererOuIdentifier: &msp.FabricOUIdentifier{ + OrganizationalUnitIdentifier: "orderer", + Certificate: cacerts[0], + }, + }, + } + + if isIntermediate { + fmspconf.FabricNodeOus.ClientOuIdentifier.Certificate = intermediateCerts[0] + fmspconf.FabricNodeOus.PeerOuIdentifier.Certificate = intermediateCerts[0] + fmspconf.FabricNodeOus.AdminOuIdentifier.Certificate = intermediateCerts[0] + fmspconf.FabricNodeOus.OrdererOuIdentifier.Certificate = intermediateCerts[0] + } + + fmpsjs, err := proto.Marshal(fmspconf) + if err != nil { + return nil, err + } + + mspconf := &msp.MSPConfig{Config: fmpsjs, Type: int32(fmsp.FABRIC)} + + return mspconf, nil +} + +func (o *Orderer) GetLabels(instance v1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + orderernode := instance.(*current.IBPOrderer) + + name := instance.GetName() + + if orderernode.Spec.NodeNumber != nil { + nodename := fmt.Sprintf("%snode%d", name, *orderernode.Spec.NodeNumber) + name = nodename + } + + return map[string]string{ + "app": name, + "creator": label, + "parent": instance.GetName(), + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "orderer", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (o *Orderer) ReadOUConfigFile(instance *current.IBPOrderer, configFile string) ([]*msp.FabricOUIdentifier, *msp.FabricNodeOUs, error) { + var ouis []*msp.FabricOUIdentifier + var nodeOUs *msp.FabricNodeOUs + // load the file, if there is a failure in loading it then + // return an error + raw, err := ioutil.ReadFile(filepath.Clean(configFile)) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed loading configuration file at [%s]", configFile) + } + + configuration := fmsp.Configuration{} + err = yaml.Unmarshal(raw, &configuration) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed unmarshalling configuration file at [%s]", configFile) + } + + n := types.NamespacedName{ + Name: fmt.Sprintf("ecert-%s%s%d-cacerts", instance.Name, NODE, 1), + Namespace: instance.Namespace, + } + caCerts := &corev1.Secret{} + err = o.Client.Get(context.TODO(), n, caCerts) + if err != nil { + return nil, nil, err + } + rawCert := caCerts.Data["cacert-0.pem"] + + // Prepare OrganizationalUnitIdentifiers + if len(configuration.OrganizationalUnitIdentifiers) > 0 { + for _, ouID := range configuration.OrganizationalUnitIdentifiers { + oui := &msp.FabricOUIdentifier{ + Certificate: rawCert, + OrganizationalUnitIdentifier: ouID.OrganizationalUnitIdentifier, + } + ouis = append(ouis, oui) + } + } + + // Prepare NodeOUs + if configuration.NodeOUs != nil && configuration.NodeOUs.Enable { + nodeOUs = &msp.FabricNodeOUs{ + Enable: true, + } + if configuration.NodeOUs.ClientOUIdentifier != nil && len(configuration.NodeOUs.ClientOUIdentifier.OrganizationalUnitIdentifier) != 0 { + nodeOUs.ClientOuIdentifier = &msp.FabricOUIdentifier{OrganizationalUnitIdentifier: configuration.NodeOUs.ClientOUIdentifier.OrganizationalUnitIdentifier} + } + if configuration.NodeOUs.PeerOUIdentifier != nil && len(configuration.NodeOUs.PeerOUIdentifier.OrganizationalUnitIdentifier) != 0 { + nodeOUs.PeerOuIdentifier = &msp.FabricOUIdentifier{OrganizationalUnitIdentifier: configuration.NodeOUs.PeerOUIdentifier.OrganizationalUnitIdentifier} + } + if configuration.NodeOUs.AdminOUIdentifier != nil && len(configuration.NodeOUs.AdminOUIdentifier.OrganizationalUnitIdentifier) != 0 { + nodeOUs.AdminOuIdentifier = &msp.FabricOUIdentifier{OrganizationalUnitIdentifier: configuration.NodeOUs.AdminOUIdentifier.OrganizationalUnitIdentifier} + } + if configuration.NodeOUs.OrdererOUIdentifier != nil && len(configuration.NodeOUs.OrdererOUIdentifier.OrganizationalUnitIdentifier) != 0 { + nodeOUs.OrdererOuIdentifier = &msp.FabricOUIdentifier{OrganizationalUnitIdentifier: configuration.NodeOUs.OrdererOUIdentifier.OrganizationalUnitIdentifier} + } + + // ClientOU + if nodeOUs.ClientOuIdentifier != nil { + nodeOUs.ClientOuIdentifier.Certificate = rawCert + } + // PeerOU + if nodeOUs.PeerOuIdentifier != nil { + nodeOUs.PeerOuIdentifier.Certificate = rawCert + } + // AdminOU + if nodeOUs.AdminOuIdentifier != nil { + nodeOUs.AdminOuIdentifier.Certificate = rawCert + } + // OrdererOU + if nodeOUs.OrdererOuIdentifier != nil { + nodeOUs.OrdererOuIdentifier.Certificate = rawCert + } + } + + return ouis, nodeOUs, nil +} + +func (o *Orderer) DeleteNode(instance *current.IBPOrderer, nodes int) error { + if nodes == 0 { + return errors.New("no cluster nodes left to delete") + } + + node := o.GetNode(nodes) + err := node.Delete(instance) + if err != nil { + return errors.Wrapf(err, "failed to delete node '%s'", node.Name) + } + + return nil +} + +func (o *Orderer) GetNodes(instance *current.IBPOrderer) []*Node { + size := instance.Spec.ClusterSize + nodes := []*Node{} + for i := 1; i <= size; i++ { + node := o.GetNode(i) + nodes = append(nodes, node) + } + return nodes +} + +func (o *Orderer) GetNode(nodeNumber int) *Node { + return o.NodeManager.GetNode(nodeNumber, o.RenewCertTimers, o.RestartManager) +} + +func (o *Orderer) CheckCSRHosts(instance *current.IBPOrderer, hosts []string) { + if instance.Spec.Secret != nil { + if instance.Spec.Secret.Enrollment != nil { + if instance.Spec.Secret.Enrollment.TLS == nil { + instance.Spec.Secret.Enrollment.TLS = ¤t.Enrollment{} + } + if instance.Spec.Secret.Enrollment.TLS.CSR == nil { + instance.Spec.Secret.Enrollment.TLS.CSR = ¤t.CSR{} + instance.Spec.Secret.Enrollment.TLS.CSR.Hosts = hosts + } else { + for _, host := range instance.Spec.Secret.Enrollment.TLS.CSR.Hosts { + hosts = util.AppendStringIfMissing(hosts, host) + } + instance.Spec.Secret.Enrollment.TLS.CSR.Hosts = hosts + } + } + } +} + +func GetDomainPort(address string) (string, string) { + u := strings.Split(address, ":") + return u[0], u[1] +} + +func (o *Orderer) PatchStatus(instance *current.IBPOrderer) error { + return o.Client.PatchStatus(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) +} + +func (o *Orderer) UpdateStatus(instance *current.IBPOrderer) error { + return o.Client.UpdateStatus(context.TODO(), instance) +} diff --git a/pkg/offering/base/orderer/orderer_suite_test.go b/pkg/offering/base/orderer/orderer_suite_test.go new file mode 100644 index 00000000..45430173 --- /dev/null +++ b/pkg/offering/base/orderer/orderer_suite_test.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseorderer_test + +import ( + "net" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOrderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Orderer Suite") +} + +var ( + ln net.Listener +) + +var _ = BeforeSuite(func() { + var err error + ln, err = net.Listen("tcp", "0.0.0.0:2346") + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + ln.Close() +}) diff --git a/pkg/offering/base/orderer/orderer_test.go b/pkg/offering/base/orderer/orderer_test.go new file mode 100644 index 00000000..59eff4b2 --- /dev/null +++ b/pkg/offering/base/orderer/orderer_test.go @@ -0,0 +1,216 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package baseorderer_test + +import ( + "context" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/mocks" + orderermocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/mocks" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Base Orderer", func() { + var ( + orderer *baseorderer.Orderer + instance *current.IBPOrderer + mockKubeClient *cmocks.Client + nodeManager *orderermocks.NodeManager + + ordererNodeMgr *managermocks.ResourceManager + update *mocks.Update + ) + + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + ClusterSize: 1, + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + OrgName: "orderermsp", + MSPID: "orderermsp", + ExternalAddress: "ibporderer:7050", + ImagePullSecrets: []string{"regcred"}, + }, + } + instance.Kind = "IBPOrderer" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPOrderer: + o := obj.(*current.IBPOrderer) + o.Kind = "IBPOrderer" + instance = o + case *corev1.Service: + o := obj.(*corev1.Service) + o.Spec.Type = corev1.ServiceTypeNodePort + o.Spec.Ports = append(o.Spec.Ports, corev1.ServicePort{ + Name: "orderer-api", + TargetPort: intstr.IntOrString{ + IntVal: 7051, + }, + NodePort: int32(7051), + }) + } + return nil + } + + ordererNodeMgr = &managermocks.ResourceManager{} + + nodeManager = &orderermocks.NodeManager{} + orderer = &baseorderer.Orderer{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: &config.Config{ + OrdererInitConfig: &ordererinit.Config{ + ConfigTxFile: "../../../../defaultconfig/orderer/configtx.yaml", + OUFile: "../../../../defaultconfig/orderer/ouconfig.yaml", + }, + }, + + NodeManager: nodeManager, + OrdererNodeManager: ordererNodeMgr, + } + }) + + Context("Reconciles", func() { + PIt("reconciles IBPOrderer", func() { + _, err := orderer.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("check csr hosts", func() { + It("adds csr hosts if not present", func() { + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{}, + }, + }, + } + hosts := []string{"test.com", "127.0.0.1"} + orderer.CheckCSRHosts(instance, hosts) + Expect(instance.Spec.Secret.Enrollment.TLS).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(Equal(hosts)) + }) + + It("appends csr hosts if passed", func() { + hostsCustom := []string{"custom.domain.com"} + hosts := []string{"test.com", "127.0.0.1"} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + TLS: ¤t.Enrollment{ + CSR: ¤t.CSR{ + Hosts: hostsCustom, + }, + }, + }, + }, + }, + } + orderer.CheckCSRHosts(instance, hosts) + Expect(instance.Spec.Secret.Enrollment.TLS).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(ContainElement(hostsCustom[0])) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(ContainElement(hosts[0])) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(ContainElement(hosts[1])) + }) + }) + + Context("images overrides", func() { + var images *current.OrdererImages + + Context("using registry url", func() { + BeforeEach(func() { + images = ¤t.OrdererImages{ + OrdererInitImage: "ordererinitimage", + OrdererInitTag: "2.0.0", + OrdererImage: "ordererimage", + OrdererTag: "2.0.0", + GRPCWebImage: "grpcimage", + GRPCWebTag: "2.0.0", + } + }) + + It("overrides images based with registry url and does not append more value on each call", func() { + images.Override(images, "ghcr.io/ibm-blockchain/", "amd64") + Expect(images.OrdererInitImage).To(Equal("ghcr.io/ibm-blockchain/ordererinitimage")) + Expect(images.OrdererInitTag).To(Equal("2.0.0")) + Expect(images.OrdererImage).To(Equal("ghcr.io/ibm-blockchain/ordererimage")) + Expect(images.OrdererTag).To(Equal("2.0.0")) + Expect(images.GRPCWebImage).To(Equal("ghcr.io/ibm-blockchain/grpcimage")) + Expect(images.GRPCWebTag).To(Equal("2.0.0")) + }) + + It("overrides images based with registry url and does not append more value on each call", func() { + images.Override(images, "ghcr.io/ibm-blockchain/images/", "s390") + Expect(images.OrdererInitImage).To(Equal("ghcr.io/ibm-blockchain/images/ordererinitimage")) + Expect(images.OrdererInitTag).To(Equal("2.0.0")) + Expect(images.OrdererImage).To(Equal("ghcr.io/ibm-blockchain/images/ordererimage")) + Expect(images.OrdererTag).To(Equal("2.0.0")) + Expect(images.GRPCWebImage).To(Equal("ghcr.io/ibm-blockchain/images/grpcimage")) + Expect(images.GRPCWebTag).To(Equal("2.0.0")) + }) + }) + + Context("using fully qualified path", func() { + BeforeEach(func() { + images = ¤t.OrdererImages{ + OrdererInitImage: "ghcr.io/ibm-blockchain/ordererinitimage", + OrdererInitTag: "2.0.0", + OrdererImage: "ghcr.io/ibm-blockchain/ordererimage", + OrdererTag: "2.0.0", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcimage", + GRPCWebTag: "2.0.0", + } + }) + + It("keeps images and adds arch to tag", func() { + images.Override(images, "", "amd64") + Expect(images.OrdererInitImage).To(Equal("ghcr.io/ibm-blockchain/ordererinitimage")) + Expect(images.OrdererInitTag).To(Equal("2.0.0")) + Expect(images.OrdererImage).To(Equal("ghcr.io/ibm-blockchain/ordererimage")) + Expect(images.OrdererTag).To(Equal("2.0.0")) + Expect(images.GRPCWebImage).To(Equal("ghcr.io/ibm-blockchain/grpcimage")) + Expect(images.GRPCWebTag).To(Equal("2.0.0")) + }) + }) + }) +}) diff --git a/pkg/offering/base/orderer/override/deployment.go b/pkg/offering/base/orderer/override/deployment.go new file mode 100644 index 00000000..266d1c57 --- /dev/null +++ b/pkg/offering/base/orderer/override/deployment.go @@ -0,0 +1,451 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "context" + "errors" + "fmt" + "path/filepath" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("orderer_deployment_override") + +type OrdererConfig interface { + MergeWith(interface{}, bool) error + ToBytes() ([]byte, error) + UsingPKCS11() bool + SetPKCS11Defaults(bool) + GetBCCSPSection() *commonapi.BCCSP + SetDefaultKeyStore() +} + +// Container names +const ( + INIT = "init" + ORDERER = "orderer" + PROXY = "proxy" + HSMCLIENT = "hsm-client" +) + +func (o *Override) Deployment(object v1.Object, deployment *appsv1.Deployment, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateDeployment(instance, deployment) + case resources.Update: + return o.UpdateDeployment(instance, deployment) + } + + return nil +} + +func (o *Override) CreateDeployment(instance *current.IBPOrderer, k8sDep *appsv1.Deployment) error { + var err error + + if !instance.Spec.License.Accept { + return errors.New("user must accept license before continuing") + } + + ordererType := instance.Spec.OrdererType + if ordererType == "" { + return errors.New("Orderer Type not provided") + } + + systemChannelName := instance.Spec.SystemChannelName + if systemChannelName == "" { + return errors.New("System Channel Name not provided") + } + + ordererOrgName := instance.Spec.OrgName + if ordererOrgName == "" { + return errors.New("Orderer Org Name not provided") + } + + externalAddress := instance.Spec.ExternalAddress + if externalAddress == "" { + return errors.New("External Address not set") + } + + deployment := dep.New(k8sDep) + deployment.SetServiceAccountName(serviceaccount.GetName(instance.GetName())) + + orderer, err := deployment.GetContainer(ORDERER) + if err != nil { + return errors.New("orderer container not found in deployment spec") + } + grpcWeb, err := deployment.GetContainer(PROXY) + if err != nil { + return errors.New("proxy container not found in deployment spec") + } + _, err = deployment.GetContainer(INIT) + if err != nil { + return errors.New("init container not found in deployment spec") + } + + err = o.CommonDeploymentOverrides(instance, deployment) + if err != nil { + return err + } + + deployment.SetImagePullSecrets(instance.Spec.ImagePullSecrets) + + orderer.AppendConfigMapFromSourceIfMissing(instance.Name + "-env") + + claimName := instance.Name + "-pvc" + if instance.Spec.CustomNames.PVC.Orderer != "" { + claimName = instance.Spec.CustomNames.PVC.Orderer + } + deployment.AppendPVCVolumeIfMissing("orderer-data", claimName) + + grpcWeb.AppendEnvIfMissing("EXTERNAL_ADDRESS", externalAddress) + + deployment.SetAffinity(o.GetAffinity(instance)) + + if o.AdminSecretExists(instance) { + deployment.AppendSecretVolumeIfMissing("ecert-admincerts", fmt.Sprintf("ecert-%s-admincerts", instance.Name)) + orderer.AppendVolumeMountIfMissing("ecert-admincerts", "/certs/msp/admincerts") + } + + deployment.AppendSecretVolumeIfMissing("ecert-cacerts", fmt.Sprintf("ecert-%s-cacerts", instance.Name)) + + co, err := instance.GetConfigOverride() + if err != nil { + return err + } + + configOverride := co.(OrdererConfig) + if !configOverride.UsingPKCS11() { + deployment.AppendSecretVolumeIfMissing("ecert-keystore", fmt.Sprintf("ecert-%s-keystore", instance.Name)) + orderer.AppendVolumeMountIfMissing("ecert-keystore", "/certs/msp/keystore") + } + + deployment.AppendSecretVolumeIfMissing("ecert-signcert", fmt.Sprintf("ecert-%s-signcert", instance.Name)) + + secretName := fmt.Sprintf("tls-%s-cacerts", instance.Name) + ecertintercertSecret := fmt.Sprintf("ecert-%s-intercerts", instance.Name) + tlsintercertSecret := fmt.Sprintf("tls-%s-intercerts", instance.Name) + // Check if intermediate ecerts exists + if util.IntermediateSecretExists(o.Client, instance.Namespace, ecertintercertSecret) { + // Mount intermediate ecert + orderer.AppendVolumeMountIfMissing("ecert-intercerts", "/certs/msp/intermediatecerts") + deployment.AppendSecretVolumeIfMissing("ecert-intercerts", ecertintercertSecret) + } + + // Check if intermediate tlscerts exists + if util.IntermediateSecretExists(o.Client, instance.Namespace, tlsintercertSecret) { + // Mount intermediate tls certs + orderer.AppendVolumeMountIfMissing("tls-intercerts", "/certs/msp/tlsintermediatecerts") + deployment.AppendSecretVolumeIfMissing("tls-intercerts", tlsintercertSecret) + } + + deployment.AppendSecretVolumeIfMissing("tls-cacerts", secretName) + deployment.AppendSecretVolumeIfMissing("tls-keystore", fmt.Sprintf("tls-%s-keystore", instance.Name)) + deployment.AppendSecretVolumeIfMissing("tls-signcert", fmt.Sprintf("tls-%s-signcert", instance.Name)) + deployment.AppendConfigMapVolumeIfMissing("orderer-config", instance.Name+"-config") + + if !instance.Spec.IsUsingChannelLess() { + deployment.AppendSecretVolumeIfMissing("orderer-genesis", fmt.Sprintf("%s-genesis", instance.Name)) + orderer.AppendVolumeMountIfMissing("orderer-genesis", "/certs/genesis") + } + + secret := &corev1.Secret{} + err = o.Client.Get( + context.TODO(), + types.NamespacedName{Name: instance.GetName() + "-secret", Namespace: instance.GetNamespace()}, + secret, + ) + if err == nil { + orderer.AppendEnvIfMissing("RESTART_OLD_RESOURCEVER", secret.ObjectMeta.ResourceVersion) + } + + deployment.UpdateContainer(orderer) + if instance.UsingHSMProxy() { + orderer.AppendEnvIfMissing("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + } else if instance.IsHSMEnabled() { + deployment.AppendVolumeIfMissing(corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }) + + orderer.AppendVolumeMountWithSubPathIfMissing("shared", "/hsm/lib", "hsm") + + hsmConfig, err := config.ReadHSMConfig(o.Client, instance) + if err != nil { + return err + } + + hsmSettings(instance, hsmConfig, orderer, deployment) + deployment.UpdateContainer(orderer) + } + + return nil +} + +func (o *Override) UpdateDeployment(instance *current.IBPOrderer, k8sDep *appsv1.Deployment) error { + deployment := dep.New(k8sDep) + err := o.CommonDeploymentOverrides(instance, deployment) + if err != nil { + return err + } + + if instance.UsingHSMProxy() { + orderer := deployment.MustGetContainer(ORDERER) + orderer.UpdateEnv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + deployment.UpdateContainer(orderer) + } else if instance.IsHSMEnabled() { + hsmInitCont := deployment.MustGetContainer(HSMCLIENT) + image := instance.Spec.Images + if image != nil { + hsmInitCont.SetImage(image.HSMImage, image.HSMTag) + } + } + + return nil +} + +func (o *Override) CommonDeploymentOverrides(instance *current.IBPOrderer, deployment *dep.Deployment) error { + orderer := deployment.MustGetContainer(ORDERER) + grpcProxy := deployment.MustGetContainer(PROXY) + initCont := deployment.MustGetContainer(INIT) + + if instance.Spec.Replicas != nil { + if *instance.Spec.Replicas > 1 { + return errors.New("replicas > 1 not allowed in IBPOrderer") + } + deployment.SetReplicas(instance.Spec.Replicas) + } + + resourcesRequest := instance.Spec.Resources + if resourcesRequest != nil { + if resourcesRequest.Init != nil { + err := initCont.UpdateResources(resourcesRequest.Init) + if err != nil { + return err + } + } + if resourcesRequest.Orderer != nil { + err := orderer.UpdateResources(resourcesRequest.Orderer) + if err != nil { + return err + } + } + if resourcesRequest.GRPCProxy != nil { + err := grpcProxy.UpdateResources(resourcesRequest.GRPCProxy) + if err != nil { + return err + } + } + } + + image := instance.Spec.Images + if image != nil { + orderer.SetImage(image.OrdererImage, image.OrdererTag) + initCont.SetImage(image.OrdererInitImage, image.OrdererInitTag) + grpcProxy.SetImage(image.GRPCWebImage, image.GRPCWebTag) + } + + if o.Config != nil && o.Config.Operator.Orderer.DisableProbes == "true" { + log.Info("Env var IBPOPERATOR_ORDERER_DISABLEPROBES set to 'true', disabling orderer container probes") + orderer.SetLivenessProbe(nil) + orderer.SetReadinessProbe(nil) + orderer.SetStartupProbe(nil) + } + + deployment.UpdateContainer(orderer) + deployment.UpdateContainer(grpcProxy) + deployment.UpdateInitContainer(initCont) + + return nil +} + +func (o *Override) GetAffinity(instance *current.IBPOrderer) *corev1.Affinity { + arch := instance.Spec.Arch + zone := instance.Spec.Zone + region := instance.Spec.Region + nodeSelectorTerms := common.GetNodeSelectorTerms(arch, zone, region) + + orgName := instance.Spec.OrgName + podAntiAffinity := common.GetPodAntiAffinity(orgName) + + affinity := &corev1.Affinity{ + PodAntiAffinity: podAntiAffinity, + } + + if len(nodeSelectorTerms[0].MatchExpressions) != 0 { + affinity.NodeAffinity = &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: nodeSelectorTerms, + }, + } + } + + return affinity +} + +func (o *Override) AdminSecretExists(instance *current.IBPOrderer) bool { + secret := &corev1.Secret{} + err := o.Client.Get(context.TODO(), types.NamespacedName{ + Name: fmt.Sprintf("ecert-%s-admincerts", instance.Name), + Namespace: instance.Namespace}, secret) + if err != nil { + return false + } + + return true +} + +func hsmInitContainer(instance *current.IBPOrderer, hsmConfig *config.HSMConfig) *container.Container { + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + f := false + user := int64(0) + mountPath := "/shared" + return &container.Container{ + Container: &corev1.Container{ + Name: "hsm-client", + Image: fmt.Sprintf("%s:%s", instance.Spec.Images.HSMImage, instance.Spec.Images.HSMTag), + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + }, + }, + } +} + +func hsmSettings(instance *current.IBPOrderer, hsmConfig *config.HSMConfig, ordererCont container.Container, dep *deployment.Deployment) { + for _, v := range hsmConfig.GetVolumes() { + dep.AppendVolumeIfMissing(v) + } + + for _, vm := range hsmConfig.GetVolumeMounts() { + ordererCont.AppendVolumeMountStructIfMissing(vm) + } + + for _, env := range hsmConfig.GetEnvs() { + ordererCont.AppendEnvStructIfMissing(env) + } + + if hsmConfig.Library.Auth != nil { + dep.Spec.Template.Spec.ImagePullSecrets = util.AppendPullSecretIfMissing(dep.Spec.Template.Spec.ImagePullSecrets, hsmConfig.Library.Auth.ImagePullSecret) + } + + dep.AddInitContainer(*hsmInitContainer(instance, hsmConfig)) + + // If daemon settings are configured in HSM config, create a sidecar that is running the daemon image + if hsmConfig.Daemon != nil { + hsmDaemonSettings(instance, hsmConfig, ordererCont, dep) + } +} + +func hsmDaemonSettings(instance *current.IBPOrderer, hsmConfig *config.HSMConfig, ordererCont container.Container, deployment *deployment.Deployment) { + // Unable to launch daemon if not running priviledged moe + t := true + ordererCont.SecurityContext.Privileged = &t + ordererCont.SecurityContext.AllowPrivilegeEscalation = &t + + // Update command in deployment to ensure that deamon is running before starting the ca + ordererCont.Command = []string{ + "sh", + "-c", + fmt.Sprintf("%s && orderer", config.DAEMON_CHECK_CMD), + } + + // This is the shared volume where the file 'pkcsslotd-luanched' is touched to let + // other containers know that the daemon has successfully launched. + ordererCont.AppendVolumeMountIfMissing("shared", "/shared") + + pvcVolumeName := "orderer-data" + // Certain token information requires to be stored in persistent store, the administrator + // responsible for configuring HSM sets the HSM config to point to the path where the PVC + // needs to be mounted. + var pvcMount *corev1.VolumeMount + for _, vm := range hsmConfig.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: pvcVolumeName, + MountPath: vm.MountPath, + } + } + } + + // If a pull secret is required to pull daemon image, update the deployment's image pull secrets + if hsmConfig.Daemon.Auth != nil { + deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendPullSecretIfMissing( + deployment.Spec.Template.Spec.ImagePullSecrets, + hsmConfig.Daemon.Auth.ImagePullSecret, + ) + } + + // Add daemon container to the deployment + config.AddDaemonContainer(hsmConfig, deployment, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the ca container + // and PVC volume to deployment if missing + if pvcMount != nil { + ordererCont.AppendVolumeMountStructIfMissing(*pvcMount) + deployment.AppendPVCVolumeIfMissing(pvcVolumeName, instance.PVCName()) + } +} diff --git a/pkg/offering/base/orderer/override/deployment_test.go b/pkg/offering/base/orderer/override/deployment_test.go new file mode 100644 index 00000000..bb3b5c9d --- /dev/null +++ b/pkg/offering/base/orderer/override/deployment_test.go @@ -0,0 +1,593 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2orderer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + v2ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Orderer Deployment Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPOrderer + deployment *appsv1.Deployment + mockKubeClient *mocks.Client + ) + + BeforeEach(func() { + var err error + + deployment, err = util.GetDeploymentFromFile("../../../../../definitions/orderer/deployment.yaml") + Expect(err).NotTo(HaveOccurred()) + + mockKubeClient = &mocks.Client{} + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + hsmConfig := &config.HSMConfig{ + Type: "hsm", + Version: "v1", + MountPaths: []config.MountPath{ + config.MountPath{ + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + { + Key: "cert.pem", + Path: "cert.pem", + }, + { + Key: "key.pem", + Path: "key.pem", + }, + { + Key: "server.pem", + Path: "server.pem", + }, + }, + }, + config.MountPath{ + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + Paths: []config.Path{ + { + Key: "Chrystoki.conf", + Path: "Chrystoki.conf", + }, + }, + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "env1", + Value: "env1value", + }, + }, + } + + configBytes, err := yaml.Marshal(hsmConfig) + if err != nil { + return err + } + o := obj.(*corev1.ConfigMap) + o.Data = map[string]string{"ibp-hsm-config.yaml": string(configBytes)} + } + return nil + } + + overrider = &override.Override{ + Client: mockKubeClient, + } + + replicas := int32(1) + instance = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ordereroverride", + Namespace: "namespace1", + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrgName: "orderermsp", + MSPID: "orderermsp", + OrdererType: "solo", + ExternalAddress: "0.0.0.0", + GenesisProfile: "Initial", + Storage: ¤t.OrdererStorages{}, + Service: ¤t.Service{}, + Images: ¤t.OrdererImages{ + OrdererInitImage: "fake-init-image", + OrdererInitTag: "1234", + OrdererImage: "fake-orderer-image", + OrdererTag: "1234", + GRPCWebImage: "fake-grpcweb-image", + GRPCWebTag: "1234", + }, + SystemChannelName: "testchainid", + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + ImagePullSecrets: []string{"pullsecret1"}, + Replicas: &replicas, + Resources: ¤t.OrdererResources{ + Orderer: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.6m"), + corev1.ResourceMemory: resource.MustParse("0.4m"), + corev1.ResourceEphemeralStorage: resource.MustParse("0.1m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.7m"), + corev1.ResourceMemory: resource.MustParse("0.5m"), + corev1.ResourceEphemeralStorage: resource.MustParse("0.5m"), + }, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1m"), + corev1.ResourceMemory: resource.MustParse("0.2m"), + corev1.ResourceEphemeralStorage: resource.MustParse("0.1m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.3m"), + corev1.ResourceMemory: resource.MustParse("0.4m"), + corev1.ResourceEphemeralStorage: resource.MustParse("0.5m"), + }, + }, + }, + }, + } + }) + + Context("create", func() { + It("returns an error if license is not accepted", func() { + instance.Spec.License.Accept = false + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("user must accept license before continuing")) + }) + + It("returns an error if value for Orderer Type not provided", func() { + instance.Spec.OrdererType = "" + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Orderer Type not provided")) + }) + + It("returns an error if value for System Channel Name not provided", func() { + instance.Spec.SystemChannelName = "" + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("System Channel Name not provided")) + }) + + It("returns an error if value for Org Name not provided", func() { + instance.Spec.OrgName = "" + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Orderer Org Name not provided")) + }) + + It("returns an error if value for External Address not provided", func() { + instance.Spec.ExternalAddress = "" + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("External Address not set")) + }) + + It("overrides values based on spec", func() { + mockKubeClient.GetReturnsOnCall(1, errors.New("no inter cert found")) + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting pull secret", func() { + Expect(deployment.Spec.Template.Spec.ImagePullSecrets).To(Equal([]corev1.LocalObjectReference{corev1.LocalObjectReference{ + Name: instance.Spec.ImagePullSecrets[0], + }})) + }) + + By("setting env from", func() { + envFrom := corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Name + "-env", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Containers[0].EnvFrom).To(ContainElement(envFrom)) + }) + + By("setting orderer-data volume", func() { + volume := corev1.Volume{ + Name: "orderer-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.Name + "-pvc", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(volume)) + }) + + By("setting EXTERNAL_ADDRESS env var on grpcweb container", func() { + ev := corev1.EnvVar{ + Name: "EXTERNAL_ADDRESS", + Value: instance.Spec.ExternalAddress, + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + + By("setting ecert admincerts volume and volume mount", func() { + v := corev1.Volume{ + Name: "ecert-admincerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-admincerts", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + + vm := corev1.VolumeMount{ + Name: "ecert-admincerts", + MountPath: "/certs/msp/admincerts", + } + Expect(deployment.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(vm)) + }) + + By("setting ecert cacerts volume", func() { + v := corev1.Volume{ + Name: "ecert-cacerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-cacerts", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting ecert keystore volume", func() { + v := corev1.Volume{ + Name: "ecert-keystore", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-keystore", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting ecert signcert volume", func() { + v := corev1.Volume{ + Name: "ecert-signcert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-signcert", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting tls cacerts volume", func() { + v := corev1.Volume{ + Name: "tls-cacerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("tls-%s-cacerts", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting tls keystore volume", func() { + v := corev1.Volume{ + Name: "tls-keystore", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("tls-%s-keystore", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting tls signcert volume", func() { + v := corev1.Volume{ + Name: "tls-signcert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("tls-%s-signcert", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting orderer-genesis volume", func() { + v := corev1.Volume{ + Name: "orderer-genesis", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-genesis", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting orderer-config volume", func() { + v := corev1.Volume{ + Name: "orderer-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Name + "-config", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting affinity", func() { + expectedAffinity := overrider.GetAffinity(instance) + Expect(deployment.Spec.Template.Spec.Affinity).To(Equal(expectedAffinity)) + }) + + OrdererDeploymentCommonOverrides(instance, deployment) + }) + + It("overrides values based on whether disableProbes is set to true", func() { + overrider.Config = &operatorconfig.Config{ + Operator: operatorconfig.Operator{ + Orderer: operatorconfig.Orderer{ + DisableProbes: "true", + }, + }, + } + mockKubeClient.GetReturnsOnCall(1, errors.New("no inter cert found")) + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting probe values to nil when IBPOPERATOR_ORDERER_DISABLE_PROBES is set to true", func() { + d := dep.New(deployment) + Expect(d.MustGetContainer(override.ORDERER).ReadinessProbe).To(BeNil()) + Expect(d.MustGetContainer(override.ORDERER).LivenessProbe).To(BeNil()) + Expect(d.MustGetContainer(override.ORDERER).StartupProbe).To(BeNil()) + }) + + }) + }) + + Context("update", func() { + It("overrides values based on spec", func() { + err := overrider.Deployment(instance, deployment, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + OrdererDeploymentCommonOverrides(instance, deployment) + }) + }) + + Context("Replicas", func() { + When("Replicas is greater than 1", func() { + It("returns an error", func() { + replicas := int32(2) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("replicas > 1 not allowed in IBPOrderer")) + }) + }) + When("Replicas is equal to 1", func() { + It("returns success", func() { + replicas := int32(1) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + When("Replicas is equal to 0", func() { + It("returns success", func() { + replicas := int32(0) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + When("Replicas is nil", func() { + It("returns success", func() { + instance.Spec.Replicas = nil + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Context("images", func() { + var image *current.OrdererImages + + BeforeEach(func() { + image = ¤t.OrdererImages{ + OrdererInitImage: "init-image", + OrdererImage: "orderer-image", + GRPCWebImage: "grpcweb-image", + } + instance.Spec.Images = image + }) + + When("no tag is passed", func() { + It("uses 'latest' for image tags", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("orderer-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(Equal("grpcweb-image:latest")) + }) + }) + + When("tag is passed", func() { + It("uses the passed in tag for image tags", func() { + image.OrdererInitTag = "1.0.0" + image.OrdererTag = "2.0.0" + image.GRPCWebTag = "3.0.0" + + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:1.0.0")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("orderer-image:2.0.0")) + Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(Equal("grpcweb-image:3.0.0")) + }) + }) + }) + + Context("HSM", func() { + BeforeEach(func() { + configOverride := v2ordererconfig.Orderer{ + Orderer: v2orderer.Orderer{ + General: v2orderer.General{ + BCCSP: &common.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &common.PKCS11Opts{ + Label: "partition1", + Pin: "B6T9Q7mGNG", + }, + }, + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + configRaw := json.RawMessage(configBytes) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: configRaw} + }) + + It("sets proxy env on orderer container", func() { + instance.Spec.HSM = ¤t.HSM{PKCS11Endpoint: "1.2.3.4"} + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + d := dep.New(deployment) + Expect(d.MustGetContainer(override.ORDERER).Env).To(ContainElement(corev1.EnvVar{ + Name: "PKCS11_PROXY_SOCKET", + Value: "1.2.3.4", + })) + }) + + It("configures deployment to use HSM init image", func() { + err := overrider.Deployment(instance, deployment, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + d := dep.New(deployment) + By("setting volume mounts", func() { + Expect(d.MustGetContainer(override.ORDERER).VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + })) + + Expect(d.MustGetContainer(override.ORDERER).VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + })) + }) + + By("setting env vars", func() { + Expect(d.MustGetContainer(override.ORDERER).Env).To(ContainElement(corev1.EnvVar{ + Name: "env1", + Value: "env1value", + })) + }) + + By("creating HSM init container", func() { + Expect(d.ContainerExists("hsm-client")).To(Equal(true)) + }) + }) + }) +}) + +func OrdererDeploymentCommonOverrides(instance *current.IBPOrderer, dep *appsv1.Deployment) { + By("setting orderer resources", func() { + r, err := util.GetResourcePatch(&corev1.ResourceRequirements{}, instance.Spec.Resources.Orderer) + Expect(err).NotTo(HaveOccurred()) + Expect(dep.Spec.Template.Spec.Containers[0].Resources).To(Equal(*r)) + }) + + By("setting grpcweb resources", func() { + r, err := util.GetResourcePatch(&corev1.ResourceRequirements{}, instance.Spec.Resources.GRPCProxy) + Expect(err).NotTo(HaveOccurred()) + Expect(dep.Spec.Template.Spec.Containers[1].Resources).To(Equal(*r)) + }) + + By("setting init image", func() { + Expect(dep.Spec.Template.Spec.InitContainers[0].Image).To(Equal(fmt.Sprintf("%s:%s", instance.Spec.Images.OrdererInitImage, instance.Spec.Images.OrdererInitTag))) + }) + + By("setting orderer image", func() { + Expect(dep.Spec.Template.Spec.Containers[0].Image).To(Equal(fmt.Sprintf("%s:%s", instance.Spec.Images.OrdererImage, instance.Spec.Images.OrdererTag))) + }) + + By("setting grpcweb image", func() { + Expect(dep.Spec.Template.Spec.Containers[1].Image).To(Equal(fmt.Sprintf("%s:%s", instance.Spec.Images.GRPCWebImage, instance.Spec.Images.GRPCWebTag))) + }) + + By("setting replicas", func() { + Expect(dep.Spec.Replicas).To(Equal(instance.Spec.Replicas)) + }) +} diff --git a/pkg/offering/base/orderer/override/envcm.go b/pkg/offering/base/orderer/override/envcm.go new file mode 100644 index 00000000..e60a1e0f --- /dev/null +++ b/pkg/offering/base/orderer/override/envcm.go @@ -0,0 +1,107 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) EnvCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateEnvCM(instance, cm) + case resources.Update: + return o.UpdateEnvCM(instance, cm) + } + + return nil +} + +func (o *Override) CreateEnvCM(instance *current.IBPOrderer, cm *corev1.ConfigMap) error { + genesisProfile := instance.Spec.GenesisProfile + if genesisProfile == "" { + genesisProfile = "Initial" + } + cm.Data["ORDERER_GENERAL_GENESISPROFILE"] = genesisProfile + + mspID := instance.Spec.MSPID + if mspID == "" { + return errors.New("failed to provide MSP ID for orderer") + } + cm.Data["ORDERER_GENERAL_LOCALMSPID"] = mspID + + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + if instance.Spec.IsUsingChannelLess() { + cm.Data["ORDERER_GENERAL_BOOTSTRAPMETHOD"] = "none" + } else { + cm.Data["ORDERER_GENERAL_BOOTSTRAPMETHOD"] = "file" + cm.Data["ORDERER_GENERAL_BOOTSTRAPFILE"] = "/certs/genesis/orderer.block" + } + } else { + cm.Data["ORDERER_GENERAL_GENESISMETHOD"] = "file" + cm.Data["ORDERER_GENERAL_GENESISFILE"] = "/certs/genesis/orderer.block" + } + + intermediateExists := util.IntermediateSecretExists(o.Client, instance.Namespace, fmt.Sprintf("ecert-%s-intercerts", instance.Name)) && + util.IntermediateSecretExists(o.Client, instance.Namespace, fmt.Sprintf("tls-%s-intercerts", instance.Name)) + intercertPath := "/certs/msp/tlsintermediatecerts/intercert-0.pem" + if intermediateExists { + cm.Data["ORDERER_GENERAL_TLS_ROOTCAS"] = intercertPath + cm.Data["ORDERER_OPERATIONS_TLS_ROOTCAS"] = intercertPath + cm.Data["ORDERER_OPERATIONS_TLS_CLIENTROOTCAS"] = intercertPath + cm.Data["ORDERER_GENERAL_CLUSTER_ROOTCAS"] = intercertPath + } + // Add configs for 2.4.x + // Add default cert location for admin service + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + // Enable Channel participation for 2.4.x orderers + cm.Data["ORDERER_CHANNELPARTICIPATION_ENABLED"] = "true" + + cm.Data["ORDERER_ADMIN_TLS_ENABLED"] = "true" + cm.Data["ORDERER_ADMIN_TLS_CERTIFICATE"] = "/certs/tls/signcerts/cert.pem" + cm.Data["ORDERER_ADMIN_TLS_PRIVATEKEY"] = "/certs/tls/keystore/key.pem" + cm.Data["ORDERER_ADMIN_TLS_CLIENTAUTHREQUIRED"] = "true" + // override the default value 127.0.0.1:9443 + cm.Data["ORDERER_ADMIN_LISTENADDRESS"] = "0.0.0.0:9443" + if intermediateExists { + // override intermediate cert paths for root and clientroot cas + cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = intercertPath + cm.Data["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = intercertPath + } else { + cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = "/certs/msp/tlscacerts/cacert-0.pem" + cm.Data["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = "/certs/msp/tlscacerts/cacert-0.pem" + } + } + + return nil +} + +func (o *Override) UpdateEnvCM(instance *current.IBPOrderer, cm *corev1.ConfigMap) error { + return nil +} diff --git a/pkg/offering/base/orderer/override/orderernode.go b/pkg/offering/base/orderer/override/orderernode.go new file mode 100644 index 00000000..bf720d7c --- /dev/null +++ b/pkg/offering/base/orderer/override/orderernode.go @@ -0,0 +1,70 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + "strconv" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) OrdererNode(object v1.Object, orderernode *current.IBPOrderer, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateOrderernode(instance, orderernode) + case resources.Update: + return o.UpdateOrderernode(instance, orderernode) + } + + return nil +} + +func (o *Override) CreateOrderernode(instance *current.IBPOrderer, orderernode *current.IBPOrderer) error { + if instance.Spec.ClusterLocation != nil && instance.Spec.ClusterSize != 0 { + offset := *instance.Spec.NodeNumber - 1 + instance.Spec.Region = instance.Spec.ClusterLocation[offset].Region + instance.Spec.Zone = instance.Spec.ClusterLocation[offset].Zone + + if instance.Spec.Zone != "" && instance.Spec.Region == "" { + instance.Spec.Region = "select" + } + } + + if !instance.Spec.License.Accept { + return errors.New("user must accept license before continuing") + } + + name := instance.GetName() + instance.DeepCopyInto(orderernode) + orderernode.Name = name + "node" + strconv.Itoa(*instance.Spec.NodeNumber) + orderernode.ResourceVersion = "" + orderernode.Labels = map[string]string{ + "parent": name, + } + + return nil +} + +func (o *Override) UpdateOrderernode(instance *current.IBPOrderer, deployment *current.IBPOrderer) error { + return nil +} diff --git a/pkg/offering/base/orderer/override/override.go b/pkg/offering/base/orderer/override/override.go new file mode 100644 index 00000000..de57bff7 --- /dev/null +++ b/pkg/offering/base/orderer/override/override.go @@ -0,0 +1,30 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" +) + +type Override struct { + Name string + Client controllerclient.Client + Config *config.Config +} diff --git a/pkg/offering/base/orderer/override/override_suite_test.go b/pkg/offering/base/orderer/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/base/orderer/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/base/orderer/override/override_test.go b/pkg/offering/base/orderer/override/override_test.go new file mode 100644 index 00000000..2a913066 --- /dev/null +++ b/pkg/offering/base/orderer/override/override_test.go @@ -0,0 +1,154 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" +) + +var _ = Describe("K8S Orderer Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPOrderer + ) + + BeforeEach(func() { + overrider = &override.Override{ + Client: &mocks.Client{}, + } + instance = ¤t.IBPOrderer{} + }) + + Context("Affnity", func() { + BeforeEach(func() { + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + OrgName: "orderermsp", + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + }, + } + }) + + It("returns an proper affinity when arch is passed", func() { + a := overrider.GetAffinity(instance) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"test-arch"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Key).To(Equal("orgname")) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"orderermsp"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight).To(Equal(int32(100))) + }) + + It("returns an proper affinity when no arch is passed", func() { + instance.Spec.Arch = []string{} + a := overrider.GetAffinity(instance) + Expect(a.NodeAffinity).NotTo(BeNil()) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"dal"})) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Values).To(Equal([]string{"us-south"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Key).To(Equal("orgname")) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"orderermsp"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight).To(Equal(int32(100))) + }) + }) + + Context("Deployment", func() { + var ( + orderernode *current.IBPOrderer + ) + + nodenum := 2 + + BeforeEach(func() { + instance = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os1", + }, + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrgName: "orderermsp", + MSPID: "orderermsp", + OrdererType: "solo", + ExternalAddress: "0.0.0.0", + GenesisProfile: "Initial", + Storage: ¤t.OrdererStorages{}, + Service: ¤t.Service{}, + Images: ¤t.OrdererImages{}, + Resources: ¤t.OrdererResources{}, + SystemChannelName: "testchainid", + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + ClusterSize: 2, + NodeNumber: &nodenum, + ClusterLocation: []current.IBPOrdererClusterLocation{ + current.IBPOrdererClusterLocation{ + Zone: "dal1", + Region: "us-south1", + }, + current.IBPOrdererClusterLocation{ + Zone: "dal2", + Region: "us-south2", + }, + }, + }, + } + + orderernode = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrgName: "orderermsp", + MSPID: "orderermsp", + OrdererType: "solo", + ExternalAddress: "0.0.0.0", + GenesisProfile: "Initial", + Storage: ¤t.OrdererStorages{}, + Service: ¤t.Service{}, + Images: ¤t.OrdererImages{}, + Resources: ¤t.OrdererResources{}, + SystemChannelName: "testchainid", + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + }, + } + }) + + Context("Create overrides", func() { + It("overides things correctly", func() { + err := overrider.OrdererNode(instance, orderernode, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(orderernode.Spec.Zone).To(Equal(instance.Spec.ClusterLocation[1].Zone)) + Expect(orderernode.Spec.Region).To(Equal(instance.Spec.ClusterLocation[1].Region)) + Expect(orderernode.GetName()).To(Equal(instance.GetName() + "node2")) + Expect(orderernode.Labels["parent"]).To(Equal(instance.Name)) + }) + }) + }) +}) diff --git a/pkg/offering/base/orderer/override/pvc.go b/pkg/offering/base/orderer/override/pvc.go new file mode 100644 index 00000000..51c83e38 --- /dev/null +++ b/pkg/offering/base/orderer/override/pvc.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) PVC(object v1.Object, pvc *corev1.PersistentVolumeClaim, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreatePVC(instance, pvc) + case resources.Update: + return o.UpdatePVC(instance, pvc) + } + + return nil +} + +func (o *Override) CreatePVC(instance *current.IBPOrderer, pvc *corev1.PersistentVolumeClaim) error { + storage := instance.Spec.Storage + if storage != nil { + ordererStorage := storage.Orderer + if ordererStorage != nil { + if ordererStorage.Class != "" { + pvc.Spec.StorageClassName = &ordererStorage.Class + } + if ordererStorage.Size != "" { + quantity, err := resource.ParseQuantity(ordererStorage.Size) + if err != nil { + return err + } + resourceMap := pvc.Spec.Resources.Requests + if resourceMap == nil { + resourceMap = corev1.ResourceList{} + } + resourceMap[corev1.ResourceStorage] = quantity + pvc.Spec.Resources.Requests = resourceMap + } + } + } + + if pvc.ObjectMeta.Labels == nil { + pvc.ObjectMeta.Labels = map[string]string{} + } + if instance.Spec.Zone != "" { + pvc.ObjectMeta.Labels["zone"] = instance.Spec.Zone + } + + if instance.Spec.Region != "" { + pvc.ObjectMeta.Labels["region"] = instance.Spec.Region + } + + return nil +} + +func (o *Override) UpdatePVC(instance *current.IBPOrderer, pvc *corev1.PersistentVolumeClaim) error { + return nil +} diff --git a/pkg/offering/base/orderer/override/pvc_test.go b/pkg/offering/base/orderer/override/pvc_test.go new file mode 100644 index 00000000..c9ed0c42 --- /dev/null +++ b/pkg/offering/base/orderer/override/pvc_test.go @@ -0,0 +1,99 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Orderer PVC Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPOrderer + pvc *corev1.PersistentVolumeClaim + ) + + BeforeEach(func() { + var err error + + pvc, err = util.GetPVCFromFile("../../../../../definitions/orderer/pvc.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Zone: "zone1", + Region: "region1", + Storage: ¤t.OrdererStorages{ + Orderer: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting storage class", func() { + Expect(pvc.Spec.StorageClassName).To(Equal(&instance.Spec.Storage.Orderer.Class)) + }) + + By("setting requested storage size", func() { + expectedRequests, err := resource.ParseQuantity(instance.Spec.Storage.Orderer.Size) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.Resources.Requests).To(Equal(corev1.ResourceList{corev1.ResourceStorage: expectedRequests})) + }) + + By("setting zone labels", func() { + Expect(pvc.ObjectMeta.Labels["zone"]).To(Equal(instance.Spec.Zone)) + }) + + By("setting region labels", func() { + Expect(pvc.ObjectMeta.Labels["region"]).To(Equal(instance.Spec.Region)) + }) + }) + + It("sets class to manual if spec used local", func() { + instance.Spec.Storage.Orderer.Class = "manual" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.StorageClassName).To(Equal("manual")) + }) + + It("returns an error if invalid value for size is used", func() { + instance.Spec.Storage.Orderer.Size = "10x" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("quantities must match the regular expression")) + }) + }) +}) diff --git a/pkg/offering/base/orderer/override/service.go b/pkg/offering/base/orderer/override/service.go new file mode 100644 index 00000000..13ed8308 --- /dev/null +++ b/pkg/offering/base/orderer/override/service.go @@ -0,0 +1,53 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Service(object v1.Object, service *corev1.Service, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateService(instance, service) + case resources.Update: + return o.UpdateService(instance, service) + } + + return nil +} + +func (o *Override) CreateService(instance *current.IBPOrderer, service *corev1.Service) error { + if instance.Spec.Service != nil { + serviceType := instance.Spec.Service.Type + if serviceType != "" { + service.Spec.Type = serviceType + } + } + + return nil +} + +func (o *Override) UpdateService(instance *current.IBPOrderer, service *corev1.Service) error { + return nil +} diff --git a/pkg/offering/base/orderer/override/service_test.go b/pkg/offering/base/orderer/override/service_test.go new file mode 100644 index 00000000..af7f72ec --- /dev/null +++ b/pkg/offering/base/orderer/override/service_test.go @@ -0,0 +1,65 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Orderer Service Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPOrderer + service *corev1.Service + ) + + BeforeEach(func() { + var err error + + service, err = util.GetServiceFromFile("../../../../../definitions/orderer/service.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Service: ¤t.Service{ + Type: corev1.ServiceTypeNodePort, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.Service(instance, service, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service type", func() { + Expect(service.Spec.Type).To(Equal(instance.Spec.Service.Type)) + }) + }) + }) +}) diff --git a/pkg/offering/base/orderer/override/serviceaccount.go b/pkg/offering/base/orderer/override/serviceaccount.go new file mode 100644 index 00000000..0e91d7a3 --- /dev/null +++ b/pkg/offering/base/orderer/override/serviceaccount.go @@ -0,0 +1,58 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) ServiceAccount(object v1.Object, sa *corev1.ServiceAccount, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateServiceAccount(instance, sa) + case resources.Update: + return o.UpdateServiceAccount(instance, sa) + } + + return nil +} + +func (o *Override) CreateServiceAccount(instance *current.IBPOrderer, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) UpdateServiceAccount(instance *current.IBPOrderer, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) commonServiceAccount(instance *current.IBPOrderer, sa *corev1.ServiceAccount) error { + for _, pullSecret := range instance.Spec.ImagePullSecrets { + imagePullSecret := corev1.LocalObjectReference{ + Name: pullSecret, + } + + sa.ImagePullSecrets = append(sa.ImagePullSecrets, imagePullSecret) + } + + return nil +} diff --git a/pkg/offering/base/orderer/override/serviceaccount_test.go b/pkg/offering/base/orderer/override/serviceaccount_test.go new file mode 100644 index 00000000..f7bfdf38 --- /dev/null +++ b/pkg/offering/base/orderer/override/serviceaccount_test.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Orderer Service Account Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPOrderer + sa *corev1.ServiceAccount + ) + + BeforeEach(func() { + var err error + + sa, err = util.GetServiceAccountFromFile("../../../../../definitions/orderer/serviceaccount.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPOrdererSpec{ + ImagePullSecrets: []string{"pullsecret1"}, + }, + } + }) + + Context("create", func() { + It("overrides values in service account, based on Orderer's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + }) + }) + + Context("update", func() { + It("overrides values in service account, based on Orderer's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + }) + }) + }) + }) +}) diff --git a/pkg/offering/base/peer/mocks/certificate_manager.go b/pkg/offering/base/peer/mocks/certificate_manager.go new file mode 100644 index 00000000..be80fa05 --- /dev/null +++ b/pkg/offering/base/peer/mocks/certificate_manager.go @@ -0,0 +1,379 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + "time" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commona "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CertificateManager struct { + CheckCertificatesForExpireStub func(v1.Object, int64) (v1beta1.IBPCRStatusType, string, error) + checkCertificatesForExpireMutex sync.RWMutex + checkCertificatesForExpireArgsForCall []struct { + arg1 v1.Object + arg2 int64 + } + checkCertificatesForExpireReturns struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + } + checkCertificatesForExpireReturnsOnCall map[int]struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + } + GetDurationToNextRenewalStub func(common.SecretType, v1.Object, int64) (time.Duration, error) + getDurationToNextRenewalMutex sync.RWMutex + getDurationToNextRenewalArgsForCall []struct { + arg1 common.SecretType + arg2 v1.Object + arg3 int64 + } + getDurationToNextRenewalReturns struct { + result1 time.Duration + result2 error + } + getDurationToNextRenewalReturnsOnCall map[int]struct { + result1 time.Duration + result2 error + } + GetSignCertStub func(string, string) ([]byte, error) + getSignCertMutex sync.RWMutex + getSignCertArgsForCall []struct { + arg1 string + arg2 string + } + getSignCertReturns struct { + result1 []byte + result2 error + } + getSignCertReturnsOnCall map[int]struct { + result1 []byte + result2 error + } + RenewCertStub func(common.SecretType, certificate.Instance, *v1beta1.EnrollmentSpec, *commona.BCCSP, string, bool, bool) error + renewCertMutex sync.RWMutex + renewCertArgsForCall []struct { + arg1 common.SecretType + arg2 certificate.Instance + arg3 *v1beta1.EnrollmentSpec + arg4 *commona.BCCSP + arg5 string + arg6 bool + arg7 bool + } + renewCertReturns struct { + result1 error + } + renewCertReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CertificateManager) CheckCertificatesForExpire(arg1 v1.Object, arg2 int64) (v1beta1.IBPCRStatusType, string, error) { + fake.checkCertificatesForExpireMutex.Lock() + ret, specificReturn := fake.checkCertificatesForExpireReturnsOnCall[len(fake.checkCertificatesForExpireArgsForCall)] + fake.checkCertificatesForExpireArgsForCall = append(fake.checkCertificatesForExpireArgsForCall, struct { + arg1 v1.Object + arg2 int64 + }{arg1, arg2}) + stub := fake.CheckCertificatesForExpireStub + fakeReturns := fake.checkCertificatesForExpireReturns + fake.recordInvocation("CheckCertificatesForExpire", []interface{}{arg1, arg2}) + fake.checkCertificatesForExpireMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2, ret.result3 + } + return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 +} + +func (fake *CertificateManager) CheckCertificatesForExpireCallCount() int { + fake.checkCertificatesForExpireMutex.RLock() + defer fake.checkCertificatesForExpireMutex.RUnlock() + return len(fake.checkCertificatesForExpireArgsForCall) +} + +func (fake *CertificateManager) CheckCertificatesForExpireCalls(stub func(v1.Object, int64) (v1beta1.IBPCRStatusType, string, error)) { + fake.checkCertificatesForExpireMutex.Lock() + defer fake.checkCertificatesForExpireMutex.Unlock() + fake.CheckCertificatesForExpireStub = stub +} + +func (fake *CertificateManager) CheckCertificatesForExpireArgsForCall(i int) (v1.Object, int64) { + fake.checkCertificatesForExpireMutex.RLock() + defer fake.checkCertificatesForExpireMutex.RUnlock() + argsForCall := fake.checkCertificatesForExpireArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CertificateManager) CheckCertificatesForExpireReturns(result1 v1beta1.IBPCRStatusType, result2 string, result3 error) { + fake.checkCertificatesForExpireMutex.Lock() + defer fake.checkCertificatesForExpireMutex.Unlock() + fake.CheckCertificatesForExpireStub = nil + fake.checkCertificatesForExpireReturns = struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + }{result1, result2, result3} +} + +func (fake *CertificateManager) CheckCertificatesForExpireReturnsOnCall(i int, result1 v1beta1.IBPCRStatusType, result2 string, result3 error) { + fake.checkCertificatesForExpireMutex.Lock() + defer fake.checkCertificatesForExpireMutex.Unlock() + fake.CheckCertificatesForExpireStub = nil + if fake.checkCertificatesForExpireReturnsOnCall == nil { + fake.checkCertificatesForExpireReturnsOnCall = make(map[int]struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + }) + } + fake.checkCertificatesForExpireReturnsOnCall[i] = struct { + result1 v1beta1.IBPCRStatusType + result2 string + result3 error + }{result1, result2, result3} +} + +func (fake *CertificateManager) GetDurationToNextRenewal(arg1 common.SecretType, arg2 v1.Object, arg3 int64) (time.Duration, error) { + fake.getDurationToNextRenewalMutex.Lock() + ret, specificReturn := fake.getDurationToNextRenewalReturnsOnCall[len(fake.getDurationToNextRenewalArgsForCall)] + fake.getDurationToNextRenewalArgsForCall = append(fake.getDurationToNextRenewalArgsForCall, struct { + arg1 common.SecretType + arg2 v1.Object + arg3 int64 + }{arg1, arg2, arg3}) + stub := fake.GetDurationToNextRenewalStub + fakeReturns := fake.getDurationToNextRenewalReturns + fake.recordInvocation("GetDurationToNextRenewal", []interface{}{arg1, arg2, arg3}) + fake.getDurationToNextRenewalMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CertificateManager) GetDurationToNextRenewalCallCount() int { + fake.getDurationToNextRenewalMutex.RLock() + defer fake.getDurationToNextRenewalMutex.RUnlock() + return len(fake.getDurationToNextRenewalArgsForCall) +} + +func (fake *CertificateManager) GetDurationToNextRenewalCalls(stub func(common.SecretType, v1.Object, int64) (time.Duration, error)) { + fake.getDurationToNextRenewalMutex.Lock() + defer fake.getDurationToNextRenewalMutex.Unlock() + fake.GetDurationToNextRenewalStub = stub +} + +func (fake *CertificateManager) GetDurationToNextRenewalArgsForCall(i int) (common.SecretType, v1.Object, int64) { + fake.getDurationToNextRenewalMutex.RLock() + defer fake.getDurationToNextRenewalMutex.RUnlock() + argsForCall := fake.getDurationToNextRenewalArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *CertificateManager) GetDurationToNextRenewalReturns(result1 time.Duration, result2 error) { + fake.getDurationToNextRenewalMutex.Lock() + defer fake.getDurationToNextRenewalMutex.Unlock() + fake.GetDurationToNextRenewalStub = nil + fake.getDurationToNextRenewalReturns = struct { + result1 time.Duration + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetDurationToNextRenewalReturnsOnCall(i int, result1 time.Duration, result2 error) { + fake.getDurationToNextRenewalMutex.Lock() + defer fake.getDurationToNextRenewalMutex.Unlock() + fake.GetDurationToNextRenewalStub = nil + if fake.getDurationToNextRenewalReturnsOnCall == nil { + fake.getDurationToNextRenewalReturnsOnCall = make(map[int]struct { + result1 time.Duration + result2 error + }) + } + fake.getDurationToNextRenewalReturnsOnCall[i] = struct { + result1 time.Duration + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetSignCert(arg1 string, arg2 string) ([]byte, error) { + fake.getSignCertMutex.Lock() + ret, specificReturn := fake.getSignCertReturnsOnCall[len(fake.getSignCertArgsForCall)] + fake.getSignCertArgsForCall = append(fake.getSignCertArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.GetSignCertStub + fakeReturns := fake.getSignCertReturns + fake.recordInvocation("GetSignCert", []interface{}{arg1, arg2}) + fake.getSignCertMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *CertificateManager) GetSignCertCallCount() int { + fake.getSignCertMutex.RLock() + defer fake.getSignCertMutex.RUnlock() + return len(fake.getSignCertArgsForCall) +} + +func (fake *CertificateManager) GetSignCertCalls(stub func(string, string) ([]byte, error)) { + fake.getSignCertMutex.Lock() + defer fake.getSignCertMutex.Unlock() + fake.GetSignCertStub = stub +} + +func (fake *CertificateManager) GetSignCertArgsForCall(i int) (string, string) { + fake.getSignCertMutex.RLock() + defer fake.getSignCertMutex.RUnlock() + argsForCall := fake.getSignCertArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CertificateManager) GetSignCertReturns(result1 []byte, result2 error) { + fake.getSignCertMutex.Lock() + defer fake.getSignCertMutex.Unlock() + fake.GetSignCertStub = nil + fake.getSignCertReturns = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) GetSignCertReturnsOnCall(i int, result1 []byte, result2 error) { + fake.getSignCertMutex.Lock() + defer fake.getSignCertMutex.Unlock() + fake.GetSignCertStub = nil + if fake.getSignCertReturnsOnCall == nil { + fake.getSignCertReturnsOnCall = make(map[int]struct { + result1 []byte + result2 error + }) + } + fake.getSignCertReturnsOnCall[i] = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *CertificateManager) RenewCert(arg1 common.SecretType, arg2 certificate.Instance, arg3 *v1beta1.EnrollmentSpec, arg4 *commona.BCCSP, arg5 string, arg6 bool, arg7 bool) error { + fake.renewCertMutex.Lock() + ret, specificReturn := fake.renewCertReturnsOnCall[len(fake.renewCertArgsForCall)] + fake.renewCertArgsForCall = append(fake.renewCertArgsForCall, struct { + arg1 common.SecretType + arg2 certificate.Instance + arg3 *v1beta1.EnrollmentSpec + arg4 *commona.BCCSP + arg5 string + arg6 bool + arg7 bool + }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + stub := fake.RenewCertStub + fakeReturns := fake.renewCertReturns + fake.recordInvocation("RenewCert", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + fake.renewCertMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CertificateManager) RenewCertCallCount() int { + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + return len(fake.renewCertArgsForCall) +} + +func (fake *CertificateManager) RenewCertCalls(stub func(common.SecretType, certificate.Instance, *v1beta1.EnrollmentSpec, *commona.BCCSP, string, bool, bool) error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = stub +} + +func (fake *CertificateManager) RenewCertArgsForCall(i int) (common.SecretType, certificate.Instance, *v1beta1.EnrollmentSpec, *commona.BCCSP, string, bool, bool) { + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + argsForCall := fake.renewCertArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 +} + +func (fake *CertificateManager) RenewCertReturns(result1 error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = nil + fake.renewCertReturns = struct { + result1 error + }{result1} +} + +func (fake *CertificateManager) RenewCertReturnsOnCall(i int, result1 error) { + fake.renewCertMutex.Lock() + defer fake.renewCertMutex.Unlock() + fake.RenewCertStub = nil + if fake.renewCertReturnsOnCall == nil { + fake.renewCertReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.renewCertReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *CertificateManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkCertificatesForExpireMutex.RLock() + defer fake.checkCertificatesForExpireMutex.RUnlock() + fake.getDurationToNextRenewalMutex.RLock() + defer fake.getDurationToNextRenewalMutex.RUnlock() + fake.getSignCertMutex.RLock() + defer fake.getSignCertMutex.RUnlock() + fake.renewCertMutex.RLock() + defer fake.renewCertMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CertificateManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ basepeer.CertificateManager = new(CertificateManager) diff --git a/pkg/offering/base/peer/mocks/deployment_manager.go b/pkg/offering/base/peer/mocks/deployment_manager.go new file mode 100644 index 00000000..67269fa8 --- /dev/null +++ b/pkg/offering/base/peer/mocks/deployment_manager.go @@ -0,0 +1,827 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + v1a "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type DeploymentManager struct { + CheckForSecretChangeStub func(v1.Object, string, func(string, *v1a.Deployment) bool) error + checkForSecretChangeMutex sync.RWMutex + checkForSecretChangeArgsForCall []struct { + arg1 v1.Object + arg2 string + arg3 func(string, *v1a.Deployment) bool + } + checkForSecretChangeReturns struct { + result1 error + } + checkForSecretChangeReturnsOnCall map[int]struct { + result1 error + } + CheckStateStub func(v1.Object) error + checkStateMutex sync.RWMutex + checkStateArgsForCall []struct { + arg1 v1.Object + } + checkStateReturns struct { + result1 error + } + checkStateReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(v1.Object) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 v1.Object + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + DeploymentStatusStub func(v1.Object) (v1a.DeploymentStatus, error) + deploymentStatusMutex sync.RWMutex + deploymentStatusArgsForCall []struct { + arg1 v1.Object + } + deploymentStatusReturns struct { + result1 v1a.DeploymentStatus + result2 error + } + deploymentStatusReturnsOnCall map[int]struct { + result1 v1a.DeploymentStatus + result2 error + } + ExistsStub func(v1.Object) bool + existsMutex sync.RWMutex + existsArgsForCall []struct { + arg1 v1.Object + } + existsReturns struct { + result1 bool + } + existsReturnsOnCall map[int]struct { + result1 bool + } + GetStub func(v1.Object) (client.Object, error) + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 v1.Object + } + getReturns struct { + result1 client.Object + result2 error + } + getReturnsOnCall map[int]struct { + result1 client.Object + result2 error + } + GetNameStub func(v1.Object) string + getNameMutex sync.RWMutex + getNameArgsForCall []struct { + arg1 v1.Object + } + getNameReturns struct { + result1 string + } + getNameReturnsOnCall map[int]struct { + result1 string + } + GetSchemeStub func() *runtime.Scheme + getSchemeMutex sync.RWMutex + getSchemeArgsForCall []struct { + } + getSchemeReturns struct { + result1 *runtime.Scheme + } + getSchemeReturnsOnCall map[int]struct { + result1 *runtime.Scheme + } + ReconcileStub func(v1.Object, bool) error + reconcileMutex sync.RWMutex + reconcileArgsForCall []struct { + arg1 v1.Object + arg2 bool + } + reconcileReturns struct { + result1 error + } + reconcileReturnsOnCall map[int]struct { + result1 error + } + RestoreStateStub func(v1.Object) error + restoreStateMutex sync.RWMutex + restoreStateArgsForCall []struct { + arg1 v1.Object + } + restoreStateReturns struct { + result1 error + } + restoreStateReturnsOnCall map[int]struct { + result1 error + } + SetCustomNameStub func(string) + setCustomNameMutex sync.RWMutex + setCustomNameArgsForCall []struct { + arg1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DeploymentManager) CheckForSecretChange(arg1 v1.Object, arg2 string, arg3 func(string, *v1a.Deployment) bool) error { + fake.checkForSecretChangeMutex.Lock() + ret, specificReturn := fake.checkForSecretChangeReturnsOnCall[len(fake.checkForSecretChangeArgsForCall)] + fake.checkForSecretChangeArgsForCall = append(fake.checkForSecretChangeArgsForCall, struct { + arg1 v1.Object + arg2 string + arg3 func(string, *v1a.Deployment) bool + }{arg1, arg2, arg3}) + stub := fake.CheckForSecretChangeStub + fakeReturns := fake.checkForSecretChangeReturns + fake.recordInvocation("CheckForSecretChange", []interface{}{arg1, arg2, arg3}) + fake.checkForSecretChangeMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) CheckForSecretChangeCallCount() int { + fake.checkForSecretChangeMutex.RLock() + defer fake.checkForSecretChangeMutex.RUnlock() + return len(fake.checkForSecretChangeArgsForCall) +} + +func (fake *DeploymentManager) CheckForSecretChangeCalls(stub func(v1.Object, string, func(string, *v1a.Deployment) bool) error) { + fake.checkForSecretChangeMutex.Lock() + defer fake.checkForSecretChangeMutex.Unlock() + fake.CheckForSecretChangeStub = stub +} + +func (fake *DeploymentManager) CheckForSecretChangeArgsForCall(i int) (v1.Object, string, func(string, *v1a.Deployment) bool) { + fake.checkForSecretChangeMutex.RLock() + defer fake.checkForSecretChangeMutex.RUnlock() + argsForCall := fake.checkForSecretChangeArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *DeploymentManager) CheckForSecretChangeReturns(result1 error) { + fake.checkForSecretChangeMutex.Lock() + defer fake.checkForSecretChangeMutex.Unlock() + fake.CheckForSecretChangeStub = nil + fake.checkForSecretChangeReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) CheckForSecretChangeReturnsOnCall(i int, result1 error) { + fake.checkForSecretChangeMutex.Lock() + defer fake.checkForSecretChangeMutex.Unlock() + fake.CheckForSecretChangeStub = nil + if fake.checkForSecretChangeReturnsOnCall == nil { + fake.checkForSecretChangeReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkForSecretChangeReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) CheckState(arg1 v1.Object) error { + fake.checkStateMutex.Lock() + ret, specificReturn := fake.checkStateReturnsOnCall[len(fake.checkStateArgsForCall)] + fake.checkStateArgsForCall = append(fake.checkStateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.CheckStateStub + fakeReturns := fake.checkStateReturns + fake.recordInvocation("CheckState", []interface{}{arg1}) + fake.checkStateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) CheckStateCallCount() int { + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + return len(fake.checkStateArgsForCall) +} + +func (fake *DeploymentManager) CheckStateCalls(stub func(v1.Object) error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = stub +} + +func (fake *DeploymentManager) CheckStateArgsForCall(i int) v1.Object { + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + argsForCall := fake.checkStateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) CheckStateReturns(result1 error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = nil + fake.checkStateReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) CheckStateReturnsOnCall(i int, result1 error) { + fake.checkStateMutex.Lock() + defer fake.checkStateMutex.Unlock() + fake.CheckStateStub = nil + if fake.checkStateReturnsOnCall == nil { + fake.checkStateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.checkStateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) Delete(arg1 v1.Object) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *DeploymentManager) DeleteCalls(stub func(v1.Object) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *DeploymentManager) DeleteArgsForCall(i int) v1.Object { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeploymentStatus(arg1 v1.Object) (v1a.DeploymentStatus, error) { + fake.deploymentStatusMutex.Lock() + ret, specificReturn := fake.deploymentStatusReturnsOnCall[len(fake.deploymentStatusArgsForCall)] + fake.deploymentStatusArgsForCall = append(fake.deploymentStatusArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeploymentStatusStub + fakeReturns := fake.deploymentStatusReturns + fake.recordInvocation("DeploymentStatus", []interface{}{arg1}) + fake.deploymentStatusMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) DeploymentStatusCallCount() int { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + return len(fake.deploymentStatusArgsForCall) +} + +func (fake *DeploymentManager) DeploymentStatusCalls(stub func(v1.Object) (v1a.DeploymentStatus, error)) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = stub +} + +func (fake *DeploymentManager) DeploymentStatusArgsForCall(i int) v1.Object { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + argsForCall := fake.deploymentStatusArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeploymentStatusReturns(result1 v1a.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + fake.deploymentStatusReturns = struct { + result1 v1a.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) DeploymentStatusReturnsOnCall(i int, result1 v1a.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + if fake.deploymentStatusReturnsOnCall == nil { + fake.deploymentStatusReturnsOnCall = make(map[int]struct { + result1 v1a.DeploymentStatus + result2 error + }) + } + fake.deploymentStatusReturnsOnCall[i] = struct { + result1 v1a.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) Exists(arg1 v1.Object) bool { + fake.existsMutex.Lock() + ret, specificReturn := fake.existsReturnsOnCall[len(fake.existsArgsForCall)] + fake.existsArgsForCall = append(fake.existsArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ExistsStub + fakeReturns := fake.existsReturns + fake.recordInvocation("Exists", []interface{}{arg1}) + fake.existsMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) ExistsCallCount() int { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + return len(fake.existsArgsForCall) +} + +func (fake *DeploymentManager) ExistsCalls(stub func(v1.Object) bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = stub +} + +func (fake *DeploymentManager) ExistsArgsForCall(i int) v1.Object { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + argsForCall := fake.existsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) ExistsReturns(result1 bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + fake.existsReturns = struct { + result1 bool + }{result1} +} + +func (fake *DeploymentManager) ExistsReturnsOnCall(i int, result1 bool) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + if fake.existsReturnsOnCall == nil { + fake.existsReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.existsReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *DeploymentManager) Get(arg1 v1.Object) (client.Object, error) { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *DeploymentManager) GetCalls(stub func(v1.Object) (client.Object, error)) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *DeploymentManager) GetArgsForCall(i int) v1.Object { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) GetReturns(result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetReturnsOnCall(i int, result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 client.Object + result2 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetName(arg1 v1.Object) string { + fake.getNameMutex.Lock() + ret, specificReturn := fake.getNameReturnsOnCall[len(fake.getNameArgsForCall)] + fake.getNameArgsForCall = append(fake.getNameArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetNameStub + fakeReturns := fake.getNameReturns + fake.recordInvocation("GetName", []interface{}{arg1}) + fake.getNameMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) GetNameCallCount() int { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + return len(fake.getNameArgsForCall) +} + +func (fake *DeploymentManager) GetNameCalls(stub func(v1.Object) string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = stub +} + +func (fake *DeploymentManager) GetNameArgsForCall(i int) v1.Object { + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + argsForCall := fake.getNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) GetNameReturns(result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + fake.getNameReturns = struct { + result1 string + }{result1} +} + +func (fake *DeploymentManager) GetNameReturnsOnCall(i int, result1 string) { + fake.getNameMutex.Lock() + defer fake.getNameMutex.Unlock() + fake.GetNameStub = nil + if fake.getNameReturnsOnCall == nil { + fake.getNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *DeploymentManager) GetScheme() *runtime.Scheme { + fake.getSchemeMutex.Lock() + ret, specificReturn := fake.getSchemeReturnsOnCall[len(fake.getSchemeArgsForCall)] + fake.getSchemeArgsForCall = append(fake.getSchemeArgsForCall, struct { + }{}) + stub := fake.GetSchemeStub + fakeReturns := fake.getSchemeReturns + fake.recordInvocation("GetScheme", []interface{}{}) + fake.getSchemeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) GetSchemeCallCount() int { + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + return len(fake.getSchemeArgsForCall) +} + +func (fake *DeploymentManager) GetSchemeCalls(stub func() *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = stub +} + +func (fake *DeploymentManager) GetSchemeReturns(result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + fake.getSchemeReturns = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentManager) GetSchemeReturnsOnCall(i int, result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + if fake.getSchemeReturnsOnCall == nil { + fake.getSchemeReturnsOnCall = make(map[int]struct { + result1 *runtime.Scheme + }) + } + fake.getSchemeReturnsOnCall[i] = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentManager) Reconcile(arg1 v1.Object, arg2 bool) error { + fake.reconcileMutex.Lock() + ret, specificReturn := fake.reconcileReturnsOnCall[len(fake.reconcileArgsForCall)] + fake.reconcileArgsForCall = append(fake.reconcileArgsForCall, struct { + arg1 v1.Object + arg2 bool + }{arg1, arg2}) + stub := fake.ReconcileStub + fakeReturns := fake.reconcileReturns + fake.recordInvocation("Reconcile", []interface{}{arg1, arg2}) + fake.reconcileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) ReconcileCallCount() int { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + return len(fake.reconcileArgsForCall) +} + +func (fake *DeploymentManager) ReconcileCalls(stub func(v1.Object, bool) error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = stub +} + +func (fake *DeploymentManager) ReconcileArgsForCall(i int) (v1.Object, bool) { + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + argsForCall := fake.reconcileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *DeploymentManager) ReconcileReturns(result1 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + fake.reconcileReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) ReconcileReturnsOnCall(i int, result1 error) { + fake.reconcileMutex.Lock() + defer fake.reconcileMutex.Unlock() + fake.ReconcileStub = nil + if fake.reconcileReturnsOnCall == nil { + fake.reconcileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.reconcileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) RestoreState(arg1 v1.Object) error { + fake.restoreStateMutex.Lock() + ret, specificReturn := fake.restoreStateReturnsOnCall[len(fake.restoreStateArgsForCall)] + fake.restoreStateArgsForCall = append(fake.restoreStateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.RestoreStateStub + fakeReturns := fake.restoreStateReturns + fake.recordInvocation("RestoreState", []interface{}{arg1}) + fake.restoreStateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) RestoreStateCallCount() int { + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + return len(fake.restoreStateArgsForCall) +} + +func (fake *DeploymentManager) RestoreStateCalls(stub func(v1.Object) error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = stub +} + +func (fake *DeploymentManager) RestoreStateArgsForCall(i int) v1.Object { + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + argsForCall := fake.restoreStateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) RestoreStateReturns(result1 error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = nil + fake.restoreStateReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) RestoreStateReturnsOnCall(i int, result1 error) { + fake.restoreStateMutex.Lock() + defer fake.restoreStateMutex.Unlock() + fake.RestoreStateStub = nil + if fake.restoreStateReturnsOnCall == nil { + fake.restoreStateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.restoreStateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) SetCustomName(arg1 string) { + fake.setCustomNameMutex.Lock() + fake.setCustomNameArgsForCall = append(fake.setCustomNameArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetCustomNameStub + fake.recordInvocation("SetCustomName", []interface{}{arg1}) + fake.setCustomNameMutex.Unlock() + if stub != nil { + fake.SetCustomNameStub(arg1) + } +} + +func (fake *DeploymentManager) SetCustomNameCallCount() int { + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + return len(fake.setCustomNameArgsForCall) +} + +func (fake *DeploymentManager) SetCustomNameCalls(stub func(string)) { + fake.setCustomNameMutex.Lock() + defer fake.setCustomNameMutex.Unlock() + fake.SetCustomNameStub = stub +} + +func (fake *DeploymentManager) SetCustomNameArgsForCall(i int) string { + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + argsForCall := fake.setCustomNameArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkForSecretChangeMutex.RLock() + defer fake.checkForSecretChangeMutex.RUnlock() + fake.checkStateMutex.RLock() + defer fake.checkStateMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getNameMutex.RLock() + defer fake.getNameMutex.RUnlock() + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + fake.reconcileMutex.RLock() + defer fake.reconcileMutex.RUnlock() + fake.restoreStateMutex.RLock() + defer fake.restoreStateMutex.RUnlock() + fake.setCustomNameMutex.RLock() + defer fake.setCustomNameMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DeploymentManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ basepeer.DeploymentManager = new(DeploymentManager) diff --git a/pkg/offering/base/peer/mocks/initializer.go b/pkg/offering/base/peer/mocks/initializer.go new file mode 100644 index 00000000..01fdda8c --- /dev/null +++ b/pkg/offering/base/peer/mocks/initializer.go @@ -0,0 +1,1043 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type InitializeIBPPeer struct { + CheckIfAdminCertsUpdatedStub func(*v1beta1.IBPPeer) (bool, error) + checkIfAdminCertsUpdatedMutex sync.RWMutex + checkIfAdminCertsUpdatedArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + checkIfAdminCertsUpdatedReturns struct { + result1 bool + result2 error + } + checkIfAdminCertsUpdatedReturnsOnCall map[int]struct { + result1 bool + result2 error + } + CoreConfigMapStub func() *initializer.CoreConfigMap + coreConfigMapMutex sync.RWMutex + coreConfigMapArgsForCall []struct { + } + coreConfigMapReturns struct { + result1 *initializer.CoreConfigMap + } + coreConfigMapReturnsOnCall map[int]struct { + result1 *initializer.CoreConfigMap + } + CreateStub func(initializer.CoreConfig, initializer.IBPPeer, string) (*initializer.Response, error) + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 initializer.CoreConfig + arg2 initializer.IBPPeer + arg3 string + } + createReturns struct { + result1 *initializer.Response + result2 error + } + createReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + GenerateOrdererCACertsSecretStub func(*v1beta1.IBPPeer, map[string][]byte) error + generateOrdererCACertsSecretMutex sync.RWMutex + generateOrdererCACertsSecretArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 map[string][]byte + } + generateOrdererCACertsSecretReturns struct { + result1 error + } + generateOrdererCACertsSecretReturnsOnCall map[int]struct { + result1 error + } + GenerateSecretsStub func(common.SecretType, v1.Object, *config.Response) error + generateSecretsMutex sync.RWMutex + generateSecretsArgsForCall []struct { + arg1 common.SecretType + arg2 v1.Object + arg3 *config.Response + } + generateSecretsReturns struct { + result1 error + } + generateSecretsReturnsOnCall map[int]struct { + result1 error + } + GenerateSecretsFromResponseStub func(*v1beta1.IBPPeer, *config.CryptoResponse) error + generateSecretsFromResponseMutex sync.RWMutex + generateSecretsFromResponseArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 *config.CryptoResponse + } + generateSecretsFromResponseReturns struct { + result1 error + } + generateSecretsFromResponseReturnsOnCall map[int]struct { + result1 error + } + GetCryptoStub func(*v1beta1.IBPPeer) (*config.CryptoResponse, error) + getCryptoMutex sync.RWMutex + getCryptoArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + getCryptoReturns struct { + result1 *config.CryptoResponse + result2 error + } + getCryptoReturnsOnCall map[int]struct { + result1 *config.CryptoResponse + result2 error + } + GetInitPeerStub func(*v1beta1.IBPPeer, string) (*initializer.Peer, error) + getInitPeerMutex sync.RWMutex + getInitPeerArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 string + } + getInitPeerReturns struct { + result1 *initializer.Peer + result2 error + } + getInitPeerReturnsOnCall map[int]struct { + result1 *initializer.Peer + result2 error + } + GetUpdatedPeerStub func(*v1beta1.IBPPeer) (*initializer.Peer, error) + getUpdatedPeerMutex sync.RWMutex + getUpdatedPeerArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + getUpdatedPeerReturns struct { + result1 *initializer.Peer + result2 error + } + getUpdatedPeerReturnsOnCall map[int]struct { + result1 *initializer.Peer + result2 error + } + MissingCryptoStub func(*v1beta1.IBPPeer) bool + missingCryptoMutex sync.RWMutex + missingCryptoArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + missingCryptoReturns struct { + result1 bool + } + missingCryptoReturnsOnCall map[int]struct { + result1 bool + } + UpdateStub func(initializer.CoreConfig, initializer.IBPPeer) (*initializer.Response, error) + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 initializer.CoreConfig + arg2 initializer.IBPPeer + } + updateReturns struct { + result1 *initializer.Response + result2 error + } + updateReturnsOnCall map[int]struct { + result1 *initializer.Response + result2 error + } + UpdateAdminSecretStub func(*v1beta1.IBPPeer) error + updateAdminSecretMutex sync.RWMutex + updateAdminSecretArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + updateAdminSecretReturns struct { + result1 error + } + updateAdminSecretReturnsOnCall map[int]struct { + result1 error + } + UpdateSecretsFromResponseStub func(*v1beta1.IBPPeer, *config.CryptoResponse) error + updateSecretsFromResponseMutex sync.RWMutex + updateSecretsFromResponseArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 *config.CryptoResponse + } + updateSecretsFromResponseReturns struct { + result1 error + } + updateSecretsFromResponseReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *InitializeIBPPeer) CheckIfAdminCertsUpdated(arg1 *v1beta1.IBPPeer) (bool, error) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + ret, specificReturn := fake.checkIfAdminCertsUpdatedReturnsOnCall[len(fake.checkIfAdminCertsUpdatedArgsForCall)] + fake.checkIfAdminCertsUpdatedArgsForCall = append(fake.checkIfAdminCertsUpdatedArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.CheckIfAdminCertsUpdatedStub + fakeReturns := fake.checkIfAdminCertsUpdatedReturns + fake.recordInvocation("CheckIfAdminCertsUpdated", []interface{}{arg1}) + fake.checkIfAdminCertsUpdatedMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPPeer) CheckIfAdminCertsUpdatedCallCount() int { + fake.checkIfAdminCertsUpdatedMutex.RLock() + defer fake.checkIfAdminCertsUpdatedMutex.RUnlock() + return len(fake.checkIfAdminCertsUpdatedArgsForCall) +} + +func (fake *InitializeIBPPeer) CheckIfAdminCertsUpdatedCalls(stub func(*v1beta1.IBPPeer) (bool, error)) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + defer fake.checkIfAdminCertsUpdatedMutex.Unlock() + fake.CheckIfAdminCertsUpdatedStub = stub +} + +func (fake *InitializeIBPPeer) CheckIfAdminCertsUpdatedArgsForCall(i int) *v1beta1.IBPPeer { + fake.checkIfAdminCertsUpdatedMutex.RLock() + defer fake.checkIfAdminCertsUpdatedMutex.RUnlock() + argsForCall := fake.checkIfAdminCertsUpdatedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPPeer) CheckIfAdminCertsUpdatedReturns(result1 bool, result2 error) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + defer fake.checkIfAdminCertsUpdatedMutex.Unlock() + fake.CheckIfAdminCertsUpdatedStub = nil + fake.checkIfAdminCertsUpdatedReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) CheckIfAdminCertsUpdatedReturnsOnCall(i int, result1 bool, result2 error) { + fake.checkIfAdminCertsUpdatedMutex.Lock() + defer fake.checkIfAdminCertsUpdatedMutex.Unlock() + fake.CheckIfAdminCertsUpdatedStub = nil + if fake.checkIfAdminCertsUpdatedReturnsOnCall == nil { + fake.checkIfAdminCertsUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.checkIfAdminCertsUpdatedReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) CoreConfigMap() *initializer.CoreConfigMap { + fake.coreConfigMapMutex.Lock() + ret, specificReturn := fake.coreConfigMapReturnsOnCall[len(fake.coreConfigMapArgsForCall)] + fake.coreConfigMapArgsForCall = append(fake.coreConfigMapArgsForCall, struct { + }{}) + stub := fake.CoreConfigMapStub + fakeReturns := fake.coreConfigMapReturns + fake.recordInvocation("CoreConfigMap", []interface{}{}) + fake.coreConfigMapMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) CoreConfigMapCallCount() int { + fake.coreConfigMapMutex.RLock() + defer fake.coreConfigMapMutex.RUnlock() + return len(fake.coreConfigMapArgsForCall) +} + +func (fake *InitializeIBPPeer) CoreConfigMapCalls(stub func() *initializer.CoreConfigMap) { + fake.coreConfigMapMutex.Lock() + defer fake.coreConfigMapMutex.Unlock() + fake.CoreConfigMapStub = stub +} + +func (fake *InitializeIBPPeer) CoreConfigMapReturns(result1 *initializer.CoreConfigMap) { + fake.coreConfigMapMutex.Lock() + defer fake.coreConfigMapMutex.Unlock() + fake.CoreConfigMapStub = nil + fake.coreConfigMapReturns = struct { + result1 *initializer.CoreConfigMap + }{result1} +} + +func (fake *InitializeIBPPeer) CoreConfigMapReturnsOnCall(i int, result1 *initializer.CoreConfigMap) { + fake.coreConfigMapMutex.Lock() + defer fake.coreConfigMapMutex.Unlock() + fake.CoreConfigMapStub = nil + if fake.coreConfigMapReturnsOnCall == nil { + fake.coreConfigMapReturnsOnCall = make(map[int]struct { + result1 *initializer.CoreConfigMap + }) + } + fake.coreConfigMapReturnsOnCall[i] = struct { + result1 *initializer.CoreConfigMap + }{result1} +} + +func (fake *InitializeIBPPeer) Create(arg1 initializer.CoreConfig, arg2 initializer.IBPPeer, arg3 string) (*initializer.Response, error) { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 initializer.CoreConfig + arg2 initializer.IBPPeer + arg3 string + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPPeer) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *InitializeIBPPeer) CreateCalls(stub func(initializer.CoreConfig, initializer.IBPPeer, string) (*initializer.Response, error)) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *InitializeIBPPeer) CreateArgsForCall(i int) (initializer.CoreConfig, initializer.IBPPeer, string) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *InitializeIBPPeer) CreateReturns(result1 *initializer.Response, result2 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) CreateReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) GenerateOrdererCACertsSecret(arg1 *v1beta1.IBPPeer, arg2 map[string][]byte) error { + fake.generateOrdererCACertsSecretMutex.Lock() + ret, specificReturn := fake.generateOrdererCACertsSecretReturnsOnCall[len(fake.generateOrdererCACertsSecretArgsForCall)] + fake.generateOrdererCACertsSecretArgsForCall = append(fake.generateOrdererCACertsSecretArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 map[string][]byte + }{arg1, arg2}) + stub := fake.GenerateOrdererCACertsSecretStub + fakeReturns := fake.generateOrdererCACertsSecretReturns + fake.recordInvocation("GenerateOrdererCACertsSecret", []interface{}{arg1, arg2}) + fake.generateOrdererCACertsSecretMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) GenerateOrdererCACertsSecretCallCount() int { + fake.generateOrdererCACertsSecretMutex.RLock() + defer fake.generateOrdererCACertsSecretMutex.RUnlock() + return len(fake.generateOrdererCACertsSecretArgsForCall) +} + +func (fake *InitializeIBPPeer) GenerateOrdererCACertsSecretCalls(stub func(*v1beta1.IBPPeer, map[string][]byte) error) { + fake.generateOrdererCACertsSecretMutex.Lock() + defer fake.generateOrdererCACertsSecretMutex.Unlock() + fake.GenerateOrdererCACertsSecretStub = stub +} + +func (fake *InitializeIBPPeer) GenerateOrdererCACertsSecretArgsForCall(i int) (*v1beta1.IBPPeer, map[string][]byte) { + fake.generateOrdererCACertsSecretMutex.RLock() + defer fake.generateOrdererCACertsSecretMutex.RUnlock() + argsForCall := fake.generateOrdererCACertsSecretArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPPeer) GenerateOrdererCACertsSecretReturns(result1 error) { + fake.generateOrdererCACertsSecretMutex.Lock() + defer fake.generateOrdererCACertsSecretMutex.Unlock() + fake.GenerateOrdererCACertsSecretStub = nil + fake.generateOrdererCACertsSecretReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) GenerateOrdererCACertsSecretReturnsOnCall(i int, result1 error) { + fake.generateOrdererCACertsSecretMutex.Lock() + defer fake.generateOrdererCACertsSecretMutex.Unlock() + fake.GenerateOrdererCACertsSecretStub = nil + if fake.generateOrdererCACertsSecretReturnsOnCall == nil { + fake.generateOrdererCACertsSecretReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.generateOrdererCACertsSecretReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) GenerateSecrets(arg1 common.SecretType, arg2 v1.Object, arg3 *config.Response) error { + fake.generateSecretsMutex.Lock() + ret, specificReturn := fake.generateSecretsReturnsOnCall[len(fake.generateSecretsArgsForCall)] + fake.generateSecretsArgsForCall = append(fake.generateSecretsArgsForCall, struct { + arg1 common.SecretType + arg2 v1.Object + arg3 *config.Response + }{arg1, arg2, arg3}) + stub := fake.GenerateSecretsStub + fakeReturns := fake.generateSecretsReturns + fake.recordInvocation("GenerateSecrets", []interface{}{arg1, arg2, arg3}) + fake.generateSecretsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) GenerateSecretsCallCount() int { + fake.generateSecretsMutex.RLock() + defer fake.generateSecretsMutex.RUnlock() + return len(fake.generateSecretsArgsForCall) +} + +func (fake *InitializeIBPPeer) GenerateSecretsCalls(stub func(common.SecretType, v1.Object, *config.Response) error) { + fake.generateSecretsMutex.Lock() + defer fake.generateSecretsMutex.Unlock() + fake.GenerateSecretsStub = stub +} + +func (fake *InitializeIBPPeer) GenerateSecretsArgsForCall(i int) (common.SecretType, v1.Object, *config.Response) { + fake.generateSecretsMutex.RLock() + defer fake.generateSecretsMutex.RUnlock() + argsForCall := fake.generateSecretsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *InitializeIBPPeer) GenerateSecretsReturns(result1 error) { + fake.generateSecretsMutex.Lock() + defer fake.generateSecretsMutex.Unlock() + fake.GenerateSecretsStub = nil + fake.generateSecretsReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) GenerateSecretsReturnsOnCall(i int, result1 error) { + fake.generateSecretsMutex.Lock() + defer fake.generateSecretsMutex.Unlock() + fake.GenerateSecretsStub = nil + if fake.generateSecretsReturnsOnCall == nil { + fake.generateSecretsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.generateSecretsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) GenerateSecretsFromResponse(arg1 *v1beta1.IBPPeer, arg2 *config.CryptoResponse) error { + fake.generateSecretsFromResponseMutex.Lock() + ret, specificReturn := fake.generateSecretsFromResponseReturnsOnCall[len(fake.generateSecretsFromResponseArgsForCall)] + fake.generateSecretsFromResponseArgsForCall = append(fake.generateSecretsFromResponseArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 *config.CryptoResponse + }{arg1, arg2}) + stub := fake.GenerateSecretsFromResponseStub + fakeReturns := fake.generateSecretsFromResponseReturns + fake.recordInvocation("GenerateSecretsFromResponse", []interface{}{arg1, arg2}) + fake.generateSecretsFromResponseMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) GenerateSecretsFromResponseCallCount() int { + fake.generateSecretsFromResponseMutex.RLock() + defer fake.generateSecretsFromResponseMutex.RUnlock() + return len(fake.generateSecretsFromResponseArgsForCall) +} + +func (fake *InitializeIBPPeer) GenerateSecretsFromResponseCalls(stub func(*v1beta1.IBPPeer, *config.CryptoResponse) error) { + fake.generateSecretsFromResponseMutex.Lock() + defer fake.generateSecretsFromResponseMutex.Unlock() + fake.GenerateSecretsFromResponseStub = stub +} + +func (fake *InitializeIBPPeer) GenerateSecretsFromResponseArgsForCall(i int) (*v1beta1.IBPPeer, *config.CryptoResponse) { + fake.generateSecretsFromResponseMutex.RLock() + defer fake.generateSecretsFromResponseMutex.RUnlock() + argsForCall := fake.generateSecretsFromResponseArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPPeer) GenerateSecretsFromResponseReturns(result1 error) { + fake.generateSecretsFromResponseMutex.Lock() + defer fake.generateSecretsFromResponseMutex.Unlock() + fake.GenerateSecretsFromResponseStub = nil + fake.generateSecretsFromResponseReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) GenerateSecretsFromResponseReturnsOnCall(i int, result1 error) { + fake.generateSecretsFromResponseMutex.Lock() + defer fake.generateSecretsFromResponseMutex.Unlock() + fake.GenerateSecretsFromResponseStub = nil + if fake.generateSecretsFromResponseReturnsOnCall == nil { + fake.generateSecretsFromResponseReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.generateSecretsFromResponseReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) GetCrypto(arg1 *v1beta1.IBPPeer) (*config.CryptoResponse, error) { + fake.getCryptoMutex.Lock() + ret, specificReturn := fake.getCryptoReturnsOnCall[len(fake.getCryptoArgsForCall)] + fake.getCryptoArgsForCall = append(fake.getCryptoArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.GetCryptoStub + fakeReturns := fake.getCryptoReturns + fake.recordInvocation("GetCrypto", []interface{}{arg1}) + fake.getCryptoMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPPeer) GetCryptoCallCount() int { + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + return len(fake.getCryptoArgsForCall) +} + +func (fake *InitializeIBPPeer) GetCryptoCalls(stub func(*v1beta1.IBPPeer) (*config.CryptoResponse, error)) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = stub +} + +func (fake *InitializeIBPPeer) GetCryptoArgsForCall(i int) *v1beta1.IBPPeer { + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + argsForCall := fake.getCryptoArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPPeer) GetCryptoReturns(result1 *config.CryptoResponse, result2 error) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = nil + fake.getCryptoReturns = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) GetCryptoReturnsOnCall(i int, result1 *config.CryptoResponse, result2 error) { + fake.getCryptoMutex.Lock() + defer fake.getCryptoMutex.Unlock() + fake.GetCryptoStub = nil + if fake.getCryptoReturnsOnCall == nil { + fake.getCryptoReturnsOnCall = make(map[int]struct { + result1 *config.CryptoResponse + result2 error + }) + } + fake.getCryptoReturnsOnCall[i] = struct { + result1 *config.CryptoResponse + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) GetInitPeer(arg1 *v1beta1.IBPPeer, arg2 string) (*initializer.Peer, error) { + fake.getInitPeerMutex.Lock() + ret, specificReturn := fake.getInitPeerReturnsOnCall[len(fake.getInitPeerArgsForCall)] + fake.getInitPeerArgsForCall = append(fake.getInitPeerArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 string + }{arg1, arg2}) + stub := fake.GetInitPeerStub + fakeReturns := fake.getInitPeerReturns + fake.recordInvocation("GetInitPeer", []interface{}{arg1, arg2}) + fake.getInitPeerMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPPeer) GetInitPeerCallCount() int { + fake.getInitPeerMutex.RLock() + defer fake.getInitPeerMutex.RUnlock() + return len(fake.getInitPeerArgsForCall) +} + +func (fake *InitializeIBPPeer) GetInitPeerCalls(stub func(*v1beta1.IBPPeer, string) (*initializer.Peer, error)) { + fake.getInitPeerMutex.Lock() + defer fake.getInitPeerMutex.Unlock() + fake.GetInitPeerStub = stub +} + +func (fake *InitializeIBPPeer) GetInitPeerArgsForCall(i int) (*v1beta1.IBPPeer, string) { + fake.getInitPeerMutex.RLock() + defer fake.getInitPeerMutex.RUnlock() + argsForCall := fake.getInitPeerArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPPeer) GetInitPeerReturns(result1 *initializer.Peer, result2 error) { + fake.getInitPeerMutex.Lock() + defer fake.getInitPeerMutex.Unlock() + fake.GetInitPeerStub = nil + fake.getInitPeerReturns = struct { + result1 *initializer.Peer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) GetInitPeerReturnsOnCall(i int, result1 *initializer.Peer, result2 error) { + fake.getInitPeerMutex.Lock() + defer fake.getInitPeerMutex.Unlock() + fake.GetInitPeerStub = nil + if fake.getInitPeerReturnsOnCall == nil { + fake.getInitPeerReturnsOnCall = make(map[int]struct { + result1 *initializer.Peer + result2 error + }) + } + fake.getInitPeerReturnsOnCall[i] = struct { + result1 *initializer.Peer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) GetUpdatedPeer(arg1 *v1beta1.IBPPeer) (*initializer.Peer, error) { + fake.getUpdatedPeerMutex.Lock() + ret, specificReturn := fake.getUpdatedPeerReturnsOnCall[len(fake.getUpdatedPeerArgsForCall)] + fake.getUpdatedPeerArgsForCall = append(fake.getUpdatedPeerArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.GetUpdatedPeerStub + fakeReturns := fake.getUpdatedPeerReturns + fake.recordInvocation("GetUpdatedPeer", []interface{}{arg1}) + fake.getUpdatedPeerMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPPeer) GetUpdatedPeerCallCount() int { + fake.getUpdatedPeerMutex.RLock() + defer fake.getUpdatedPeerMutex.RUnlock() + return len(fake.getUpdatedPeerArgsForCall) +} + +func (fake *InitializeIBPPeer) GetUpdatedPeerCalls(stub func(*v1beta1.IBPPeer) (*initializer.Peer, error)) { + fake.getUpdatedPeerMutex.Lock() + defer fake.getUpdatedPeerMutex.Unlock() + fake.GetUpdatedPeerStub = stub +} + +func (fake *InitializeIBPPeer) GetUpdatedPeerArgsForCall(i int) *v1beta1.IBPPeer { + fake.getUpdatedPeerMutex.RLock() + defer fake.getUpdatedPeerMutex.RUnlock() + argsForCall := fake.getUpdatedPeerArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPPeer) GetUpdatedPeerReturns(result1 *initializer.Peer, result2 error) { + fake.getUpdatedPeerMutex.Lock() + defer fake.getUpdatedPeerMutex.Unlock() + fake.GetUpdatedPeerStub = nil + fake.getUpdatedPeerReturns = struct { + result1 *initializer.Peer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) GetUpdatedPeerReturnsOnCall(i int, result1 *initializer.Peer, result2 error) { + fake.getUpdatedPeerMutex.Lock() + defer fake.getUpdatedPeerMutex.Unlock() + fake.GetUpdatedPeerStub = nil + if fake.getUpdatedPeerReturnsOnCall == nil { + fake.getUpdatedPeerReturnsOnCall = make(map[int]struct { + result1 *initializer.Peer + result2 error + }) + } + fake.getUpdatedPeerReturnsOnCall[i] = struct { + result1 *initializer.Peer + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) MissingCrypto(arg1 *v1beta1.IBPPeer) bool { + fake.missingCryptoMutex.Lock() + ret, specificReturn := fake.missingCryptoReturnsOnCall[len(fake.missingCryptoArgsForCall)] + fake.missingCryptoArgsForCall = append(fake.missingCryptoArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.MissingCryptoStub + fakeReturns := fake.missingCryptoReturns + fake.recordInvocation("MissingCrypto", []interface{}{arg1}) + fake.missingCryptoMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) MissingCryptoCallCount() int { + fake.missingCryptoMutex.RLock() + defer fake.missingCryptoMutex.RUnlock() + return len(fake.missingCryptoArgsForCall) +} + +func (fake *InitializeIBPPeer) MissingCryptoCalls(stub func(*v1beta1.IBPPeer) bool) { + fake.missingCryptoMutex.Lock() + defer fake.missingCryptoMutex.Unlock() + fake.MissingCryptoStub = stub +} + +func (fake *InitializeIBPPeer) MissingCryptoArgsForCall(i int) *v1beta1.IBPPeer { + fake.missingCryptoMutex.RLock() + defer fake.missingCryptoMutex.RUnlock() + argsForCall := fake.missingCryptoArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPPeer) MissingCryptoReturns(result1 bool) { + fake.missingCryptoMutex.Lock() + defer fake.missingCryptoMutex.Unlock() + fake.MissingCryptoStub = nil + fake.missingCryptoReturns = struct { + result1 bool + }{result1} +} + +func (fake *InitializeIBPPeer) MissingCryptoReturnsOnCall(i int, result1 bool) { + fake.missingCryptoMutex.Lock() + defer fake.missingCryptoMutex.Unlock() + fake.MissingCryptoStub = nil + if fake.missingCryptoReturnsOnCall == nil { + fake.missingCryptoReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.missingCryptoReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *InitializeIBPPeer) Update(arg1 initializer.CoreConfig, arg2 initializer.IBPPeer) (*initializer.Response, error) { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 initializer.CoreConfig + arg2 initializer.IBPPeer + }{arg1, arg2}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *InitializeIBPPeer) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *InitializeIBPPeer) UpdateCalls(stub func(initializer.CoreConfig, initializer.IBPPeer) (*initializer.Response, error)) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *InitializeIBPPeer) UpdateArgsForCall(i int) (initializer.CoreConfig, initializer.IBPPeer) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPPeer) UpdateReturns(result1 *initializer.Response, result2 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) UpdateReturnsOnCall(i int, result1 *initializer.Response, result2 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 *initializer.Response + result2 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 *initializer.Response + result2 error + }{result1, result2} +} + +func (fake *InitializeIBPPeer) UpdateAdminSecret(arg1 *v1beta1.IBPPeer) error { + fake.updateAdminSecretMutex.Lock() + ret, specificReturn := fake.updateAdminSecretReturnsOnCall[len(fake.updateAdminSecretArgsForCall)] + fake.updateAdminSecretArgsForCall = append(fake.updateAdminSecretArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.UpdateAdminSecretStub + fakeReturns := fake.updateAdminSecretReturns + fake.recordInvocation("UpdateAdminSecret", []interface{}{arg1}) + fake.updateAdminSecretMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) UpdateAdminSecretCallCount() int { + fake.updateAdminSecretMutex.RLock() + defer fake.updateAdminSecretMutex.RUnlock() + return len(fake.updateAdminSecretArgsForCall) +} + +func (fake *InitializeIBPPeer) UpdateAdminSecretCalls(stub func(*v1beta1.IBPPeer) error) { + fake.updateAdminSecretMutex.Lock() + defer fake.updateAdminSecretMutex.Unlock() + fake.UpdateAdminSecretStub = stub +} + +func (fake *InitializeIBPPeer) UpdateAdminSecretArgsForCall(i int) *v1beta1.IBPPeer { + fake.updateAdminSecretMutex.RLock() + defer fake.updateAdminSecretMutex.RUnlock() + argsForCall := fake.updateAdminSecretArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *InitializeIBPPeer) UpdateAdminSecretReturns(result1 error) { + fake.updateAdminSecretMutex.Lock() + defer fake.updateAdminSecretMutex.Unlock() + fake.UpdateAdminSecretStub = nil + fake.updateAdminSecretReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) UpdateAdminSecretReturnsOnCall(i int, result1 error) { + fake.updateAdminSecretMutex.Lock() + defer fake.updateAdminSecretMutex.Unlock() + fake.UpdateAdminSecretStub = nil + if fake.updateAdminSecretReturnsOnCall == nil { + fake.updateAdminSecretReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateAdminSecretReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) UpdateSecretsFromResponse(arg1 *v1beta1.IBPPeer, arg2 *config.CryptoResponse) error { + fake.updateSecretsFromResponseMutex.Lock() + ret, specificReturn := fake.updateSecretsFromResponseReturnsOnCall[len(fake.updateSecretsFromResponseArgsForCall)] + fake.updateSecretsFromResponseArgsForCall = append(fake.updateSecretsFromResponseArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 *config.CryptoResponse + }{arg1, arg2}) + stub := fake.UpdateSecretsFromResponseStub + fakeReturns := fake.updateSecretsFromResponseReturns + fake.recordInvocation("UpdateSecretsFromResponse", []interface{}{arg1, arg2}) + fake.updateSecretsFromResponseMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *InitializeIBPPeer) UpdateSecretsFromResponseCallCount() int { + fake.updateSecretsFromResponseMutex.RLock() + defer fake.updateSecretsFromResponseMutex.RUnlock() + return len(fake.updateSecretsFromResponseArgsForCall) +} + +func (fake *InitializeIBPPeer) UpdateSecretsFromResponseCalls(stub func(*v1beta1.IBPPeer, *config.CryptoResponse) error) { + fake.updateSecretsFromResponseMutex.Lock() + defer fake.updateSecretsFromResponseMutex.Unlock() + fake.UpdateSecretsFromResponseStub = stub +} + +func (fake *InitializeIBPPeer) UpdateSecretsFromResponseArgsForCall(i int) (*v1beta1.IBPPeer, *config.CryptoResponse) { + fake.updateSecretsFromResponseMutex.RLock() + defer fake.updateSecretsFromResponseMutex.RUnlock() + argsForCall := fake.updateSecretsFromResponseArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *InitializeIBPPeer) UpdateSecretsFromResponseReturns(result1 error) { + fake.updateSecretsFromResponseMutex.Lock() + defer fake.updateSecretsFromResponseMutex.Unlock() + fake.UpdateSecretsFromResponseStub = nil + fake.updateSecretsFromResponseReturns = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) UpdateSecretsFromResponseReturnsOnCall(i int, result1 error) { + fake.updateSecretsFromResponseMutex.Lock() + defer fake.updateSecretsFromResponseMutex.Unlock() + fake.UpdateSecretsFromResponseStub = nil + if fake.updateSecretsFromResponseReturnsOnCall == nil { + fake.updateSecretsFromResponseReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateSecretsFromResponseReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *InitializeIBPPeer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkIfAdminCertsUpdatedMutex.RLock() + defer fake.checkIfAdminCertsUpdatedMutex.RUnlock() + fake.coreConfigMapMutex.RLock() + defer fake.coreConfigMapMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.generateOrdererCACertsSecretMutex.RLock() + defer fake.generateOrdererCACertsSecretMutex.RUnlock() + fake.generateSecretsMutex.RLock() + defer fake.generateSecretsMutex.RUnlock() + fake.generateSecretsFromResponseMutex.RLock() + defer fake.generateSecretsFromResponseMutex.RUnlock() + fake.getCryptoMutex.RLock() + defer fake.getCryptoMutex.RUnlock() + fake.getInitPeerMutex.RLock() + defer fake.getInitPeerMutex.RUnlock() + fake.getUpdatedPeerMutex.RLock() + defer fake.getUpdatedPeerMutex.RUnlock() + fake.missingCryptoMutex.RLock() + defer fake.missingCryptoMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + fake.updateAdminSecretMutex.RLock() + defer fake.updateAdminSecretMutex.RUnlock() + fake.updateSecretsFromResponseMutex.RLock() + defer fake.updateSecretsFromResponseMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *InitializeIBPPeer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ basepeer.InitializeIBPPeer = new(InitializeIBPPeer) diff --git a/pkg/offering/base/peer/mocks/restart_manager.go b/pkg/offering/base/peer/mocks/restart_manager.go new file mode 100644 index 00000000..08e20f32 --- /dev/null +++ b/pkg/offering/base/peer/mocks/restart_manager.go @@ -0,0 +1,486 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type RestartManager struct { + ForAdminCertUpdateStub func(v1.Object) error + forAdminCertUpdateMutex sync.RWMutex + forAdminCertUpdateArgsForCall []struct { + arg1 v1.Object + } + forAdminCertUpdateReturns struct { + result1 error + } + forAdminCertUpdateReturnsOnCall map[int]struct { + result1 error + } + ForCertUpdateStub func(common.SecretType, v1.Object) error + forCertUpdateMutex sync.RWMutex + forCertUpdateArgsForCall []struct { + arg1 common.SecretType + arg2 v1.Object + } + forCertUpdateReturns struct { + result1 error + } + forCertUpdateReturnsOnCall map[int]struct { + result1 error + } + ForConfigOverrideStub func(v1.Object) error + forConfigOverrideMutex sync.RWMutex + forConfigOverrideArgsForCall []struct { + arg1 v1.Object + } + forConfigOverrideReturns struct { + result1 error + } + forConfigOverrideReturnsOnCall map[int]struct { + result1 error + } + ForNodeOUStub func(v1.Object) error + forNodeOUMutex sync.RWMutex + forNodeOUArgsForCall []struct { + arg1 v1.Object + } + forNodeOUReturns struct { + result1 error + } + forNodeOUReturnsOnCall map[int]struct { + result1 error + } + ForRestartActionStub func(v1.Object) error + forRestartActionMutex sync.RWMutex + forRestartActionArgsForCall []struct { + arg1 v1.Object + } + forRestartActionReturns struct { + result1 error + } + forRestartActionReturnsOnCall map[int]struct { + result1 error + } + TriggerIfNeededStub func(restart.Instance) error + triggerIfNeededMutex sync.RWMutex + triggerIfNeededArgsForCall []struct { + arg1 restart.Instance + } + triggerIfNeededReturns struct { + result1 error + } + triggerIfNeededReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *RestartManager) ForAdminCertUpdate(arg1 v1.Object) error { + fake.forAdminCertUpdateMutex.Lock() + ret, specificReturn := fake.forAdminCertUpdateReturnsOnCall[len(fake.forAdminCertUpdateArgsForCall)] + fake.forAdminCertUpdateArgsForCall = append(fake.forAdminCertUpdateArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForAdminCertUpdateStub + fakeReturns := fake.forAdminCertUpdateReturns + fake.recordInvocation("ForAdminCertUpdate", []interface{}{arg1}) + fake.forAdminCertUpdateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForAdminCertUpdateCallCount() int { + fake.forAdminCertUpdateMutex.RLock() + defer fake.forAdminCertUpdateMutex.RUnlock() + return len(fake.forAdminCertUpdateArgsForCall) +} + +func (fake *RestartManager) ForAdminCertUpdateCalls(stub func(v1.Object) error) { + fake.forAdminCertUpdateMutex.Lock() + defer fake.forAdminCertUpdateMutex.Unlock() + fake.ForAdminCertUpdateStub = stub +} + +func (fake *RestartManager) ForAdminCertUpdateArgsForCall(i int) v1.Object { + fake.forAdminCertUpdateMutex.RLock() + defer fake.forAdminCertUpdateMutex.RUnlock() + argsForCall := fake.forAdminCertUpdateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForAdminCertUpdateReturns(result1 error) { + fake.forAdminCertUpdateMutex.Lock() + defer fake.forAdminCertUpdateMutex.Unlock() + fake.ForAdminCertUpdateStub = nil + fake.forAdminCertUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForAdminCertUpdateReturnsOnCall(i int, result1 error) { + fake.forAdminCertUpdateMutex.Lock() + defer fake.forAdminCertUpdateMutex.Unlock() + fake.ForAdminCertUpdateStub = nil + if fake.forAdminCertUpdateReturnsOnCall == nil { + fake.forAdminCertUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forAdminCertUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForCertUpdate(arg1 common.SecretType, arg2 v1.Object) error { + fake.forCertUpdateMutex.Lock() + ret, specificReturn := fake.forCertUpdateReturnsOnCall[len(fake.forCertUpdateArgsForCall)] + fake.forCertUpdateArgsForCall = append(fake.forCertUpdateArgsForCall, struct { + arg1 common.SecretType + arg2 v1.Object + }{arg1, arg2}) + stub := fake.ForCertUpdateStub + fakeReturns := fake.forCertUpdateReturns + fake.recordInvocation("ForCertUpdate", []interface{}{arg1, arg2}) + fake.forCertUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForCertUpdateCallCount() int { + fake.forCertUpdateMutex.RLock() + defer fake.forCertUpdateMutex.RUnlock() + return len(fake.forCertUpdateArgsForCall) +} + +func (fake *RestartManager) ForCertUpdateCalls(stub func(common.SecretType, v1.Object) error) { + fake.forCertUpdateMutex.Lock() + defer fake.forCertUpdateMutex.Unlock() + fake.ForCertUpdateStub = stub +} + +func (fake *RestartManager) ForCertUpdateArgsForCall(i int) (common.SecretType, v1.Object) { + fake.forCertUpdateMutex.RLock() + defer fake.forCertUpdateMutex.RUnlock() + argsForCall := fake.forCertUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *RestartManager) ForCertUpdateReturns(result1 error) { + fake.forCertUpdateMutex.Lock() + defer fake.forCertUpdateMutex.Unlock() + fake.ForCertUpdateStub = nil + fake.forCertUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForCertUpdateReturnsOnCall(i int, result1 error) { + fake.forCertUpdateMutex.Lock() + defer fake.forCertUpdateMutex.Unlock() + fake.ForCertUpdateStub = nil + if fake.forCertUpdateReturnsOnCall == nil { + fake.forCertUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forCertUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForConfigOverride(arg1 v1.Object) error { + fake.forConfigOverrideMutex.Lock() + ret, specificReturn := fake.forConfigOverrideReturnsOnCall[len(fake.forConfigOverrideArgsForCall)] + fake.forConfigOverrideArgsForCall = append(fake.forConfigOverrideArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForConfigOverrideStub + fakeReturns := fake.forConfigOverrideReturns + fake.recordInvocation("ForConfigOverride", []interface{}{arg1}) + fake.forConfigOverrideMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForConfigOverrideCallCount() int { + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + return len(fake.forConfigOverrideArgsForCall) +} + +func (fake *RestartManager) ForConfigOverrideCalls(stub func(v1.Object) error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = stub +} + +func (fake *RestartManager) ForConfigOverrideArgsForCall(i int) v1.Object { + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + argsForCall := fake.forConfigOverrideArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForConfigOverrideReturns(result1 error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = nil + fake.forConfigOverrideReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForConfigOverrideReturnsOnCall(i int, result1 error) { + fake.forConfigOverrideMutex.Lock() + defer fake.forConfigOverrideMutex.Unlock() + fake.ForConfigOverrideStub = nil + if fake.forConfigOverrideReturnsOnCall == nil { + fake.forConfigOverrideReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forConfigOverrideReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForNodeOU(arg1 v1.Object) error { + fake.forNodeOUMutex.Lock() + ret, specificReturn := fake.forNodeOUReturnsOnCall[len(fake.forNodeOUArgsForCall)] + fake.forNodeOUArgsForCall = append(fake.forNodeOUArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForNodeOUStub + fakeReturns := fake.forNodeOUReturns + fake.recordInvocation("ForNodeOU", []interface{}{arg1}) + fake.forNodeOUMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForNodeOUCallCount() int { + fake.forNodeOUMutex.RLock() + defer fake.forNodeOUMutex.RUnlock() + return len(fake.forNodeOUArgsForCall) +} + +func (fake *RestartManager) ForNodeOUCalls(stub func(v1.Object) error) { + fake.forNodeOUMutex.Lock() + defer fake.forNodeOUMutex.Unlock() + fake.ForNodeOUStub = stub +} + +func (fake *RestartManager) ForNodeOUArgsForCall(i int) v1.Object { + fake.forNodeOUMutex.RLock() + defer fake.forNodeOUMutex.RUnlock() + argsForCall := fake.forNodeOUArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForNodeOUReturns(result1 error) { + fake.forNodeOUMutex.Lock() + defer fake.forNodeOUMutex.Unlock() + fake.ForNodeOUStub = nil + fake.forNodeOUReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForNodeOUReturnsOnCall(i int, result1 error) { + fake.forNodeOUMutex.Lock() + defer fake.forNodeOUMutex.Unlock() + fake.ForNodeOUStub = nil + if fake.forNodeOUReturnsOnCall == nil { + fake.forNodeOUReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forNodeOUReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartAction(arg1 v1.Object) error { + fake.forRestartActionMutex.Lock() + ret, specificReturn := fake.forRestartActionReturnsOnCall[len(fake.forRestartActionArgsForCall)] + fake.forRestartActionArgsForCall = append(fake.forRestartActionArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.ForRestartActionStub + fakeReturns := fake.forRestartActionReturns + fake.recordInvocation("ForRestartAction", []interface{}{arg1}) + fake.forRestartActionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) ForRestartActionCallCount() int { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + return len(fake.forRestartActionArgsForCall) +} + +func (fake *RestartManager) ForRestartActionCalls(stub func(v1.Object) error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = stub +} + +func (fake *RestartManager) ForRestartActionArgsForCall(i int) v1.Object { + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + argsForCall := fake.forRestartActionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) ForRestartActionReturns(result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + fake.forRestartActionReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) ForRestartActionReturnsOnCall(i int, result1 error) { + fake.forRestartActionMutex.Lock() + defer fake.forRestartActionMutex.Unlock() + fake.ForRestartActionStub = nil + if fake.forRestartActionReturnsOnCall == nil { + fake.forRestartActionReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.forRestartActionReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeeded(arg1 restart.Instance) error { + fake.triggerIfNeededMutex.Lock() + ret, specificReturn := fake.triggerIfNeededReturnsOnCall[len(fake.triggerIfNeededArgsForCall)] + fake.triggerIfNeededArgsForCall = append(fake.triggerIfNeededArgsForCall, struct { + arg1 restart.Instance + }{arg1}) + stub := fake.TriggerIfNeededStub + fakeReturns := fake.triggerIfNeededReturns + fake.recordInvocation("TriggerIfNeeded", []interface{}{arg1}) + fake.triggerIfNeededMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *RestartManager) TriggerIfNeededCallCount() int { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + return len(fake.triggerIfNeededArgsForCall) +} + +func (fake *RestartManager) TriggerIfNeededCalls(stub func(restart.Instance) error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = stub +} + +func (fake *RestartManager) TriggerIfNeededArgsForCall(i int) restart.Instance { + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + argsForCall := fake.triggerIfNeededArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *RestartManager) TriggerIfNeededReturns(result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + fake.triggerIfNeededReturns = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) TriggerIfNeededReturnsOnCall(i int, result1 error) { + fake.triggerIfNeededMutex.Lock() + defer fake.triggerIfNeededMutex.Unlock() + fake.TriggerIfNeededStub = nil + if fake.triggerIfNeededReturnsOnCall == nil { + fake.triggerIfNeededReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.triggerIfNeededReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *RestartManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.forAdminCertUpdateMutex.RLock() + defer fake.forAdminCertUpdateMutex.RUnlock() + fake.forCertUpdateMutex.RLock() + defer fake.forCertUpdateMutex.RUnlock() + fake.forConfigOverrideMutex.RLock() + defer fake.forConfigOverrideMutex.RUnlock() + fake.forNodeOUMutex.RLock() + defer fake.forNodeOUMutex.RUnlock() + fake.forRestartActionMutex.RLock() + defer fake.forRestartActionMutex.RUnlock() + fake.triggerIfNeededMutex.RLock() + defer fake.triggerIfNeededMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *RestartManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ basepeer.RestartManager = new(RestartManager) diff --git a/pkg/offering/base/peer/mocks/update.go b/pkg/offering/base/peer/mocks/update.go new file mode 100644 index 00000000..90eb970e --- /dev/null +++ b/pkg/offering/base/peer/mocks/update.go @@ -0,0 +1,1637 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" +) + +type Update struct { + CertificateCreatedStub func() bool + certificateCreatedMutex sync.RWMutex + certificateCreatedArgsForCall []struct { + } + certificateCreatedReturns struct { + result1 bool + } + certificateCreatedReturnsOnCall map[int]struct { + result1 bool + } + CertificateUpdatedStub func() bool + certificateUpdatedMutex sync.RWMutex + certificateUpdatedArgsForCall []struct { + } + certificateUpdatedReturns struct { + result1 bool + } + certificateUpdatedReturnsOnCall map[int]struct { + result1 bool + } + ConfigOverridesUpdatedStub func() bool + configOverridesUpdatedMutex sync.RWMutex + configOverridesUpdatedArgsForCall []struct { + } + configOverridesUpdatedReturns struct { + result1 bool + } + configOverridesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + CryptoBackupNeededStub func() bool + cryptoBackupNeededMutex sync.RWMutex + cryptoBackupNeededArgsForCall []struct { + } + cryptoBackupNeededReturns struct { + result1 bool + } + cryptoBackupNeededReturnsOnCall map[int]struct { + result1 bool + } + DindArgsUpdatedStub func() bool + dindArgsUpdatedMutex sync.RWMutex + dindArgsUpdatedArgsForCall []struct { + } + dindArgsUpdatedReturns struct { + result1 bool + } + dindArgsUpdatedReturnsOnCall map[int]struct { + result1 bool + } + EcertEnrollStub func() bool + ecertEnrollMutex sync.RWMutex + ecertEnrollArgsForCall []struct { + } + ecertEnrollReturns struct { + result1 bool + } + ecertEnrollReturnsOnCall map[int]struct { + result1 bool + } + EcertNewKeyReenrollStub func() bool + ecertNewKeyReenrollMutex sync.RWMutex + ecertNewKeyReenrollArgsForCall []struct { + } + ecertNewKeyReenrollReturns struct { + result1 bool + } + ecertNewKeyReenrollReturnsOnCall map[int]struct { + result1 bool + } + EcertReenrollNeededStub func() bool + ecertReenrollNeededMutex sync.RWMutex + ecertReenrollNeededArgsForCall []struct { + } + ecertReenrollNeededReturns struct { + result1 bool + } + ecertReenrollNeededReturnsOnCall map[int]struct { + result1 bool + } + EcertUpdatedStub func() bool + ecertUpdatedMutex sync.RWMutex + ecertUpdatedArgsForCall []struct { + } + ecertUpdatedReturns struct { + result1 bool + } + ecertUpdatedReturnsOnCall map[int]struct { + result1 bool + } + FabricVersionUpdatedStub func() bool + fabricVersionUpdatedMutex sync.RWMutex + fabricVersionUpdatedArgsForCall []struct { + } + fabricVersionUpdatedReturns struct { + result1 bool + } + fabricVersionUpdatedReturnsOnCall map[int]struct { + result1 bool + } + GetCreatedCertTypeStub func() common.SecretType + getCreatedCertTypeMutex sync.RWMutex + getCreatedCertTypeArgsForCall []struct { + } + getCreatedCertTypeReturns struct { + result1 common.SecretType + } + getCreatedCertTypeReturnsOnCall map[int]struct { + result1 common.SecretType + } + ImagesUpdatedStub func() bool + imagesUpdatedMutex sync.RWMutex + imagesUpdatedArgsForCall []struct { + } + imagesUpdatedReturns struct { + result1 bool + } + imagesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + MSPUpdatedStub func() bool + mSPUpdatedMutex sync.RWMutex + mSPUpdatedArgsForCall []struct { + } + mSPUpdatedReturns struct { + result1 bool + } + mSPUpdatedReturnsOnCall map[int]struct { + result1 bool + } + MigrateToV2Stub func() bool + migrateToV2Mutex sync.RWMutex + migrateToV2ArgsForCall []struct { + } + migrateToV2Returns struct { + result1 bool + } + migrateToV2ReturnsOnCall map[int]struct { + result1 bool + } + MigrateToV24Stub func() bool + migrateToV24Mutex sync.RWMutex + migrateToV24ArgsForCall []struct { + } + migrateToV24Returns struct { + result1 bool + } + migrateToV24ReturnsOnCall map[int]struct { + result1 bool + } + NodeOUUpdatedStub func() bool + nodeOUUpdatedMutex sync.RWMutex + nodeOUUpdatedArgsForCall []struct { + } + nodeOUUpdatedReturns struct { + result1 bool + } + nodeOUUpdatedReturnsOnCall map[int]struct { + result1 bool + } + PeerTagUpdatedStub func() bool + peerTagUpdatedMutex sync.RWMutex + peerTagUpdatedArgsForCall []struct { + } + peerTagUpdatedReturns struct { + result1 bool + } + peerTagUpdatedReturnsOnCall map[int]struct { + result1 bool + } + RestartNeededStub func() bool + restartNeededMutex sync.RWMutex + restartNeededArgsForCall []struct { + } + restartNeededReturns struct { + result1 bool + } + restartNeededReturnsOnCall map[int]struct { + result1 bool + } + SetDindArgsUpdatedStub func(bool) + setDindArgsUpdatedMutex sync.RWMutex + setDindArgsUpdatedArgsForCall []struct { + arg1 bool + } + SpecUpdatedStub func() bool + specUpdatedMutex sync.RWMutex + specUpdatedArgsForCall []struct { + } + specUpdatedReturns struct { + result1 bool + } + specUpdatedReturnsOnCall map[int]struct { + result1 bool + } + TLSCertEnrollStub func() bool + tLSCertEnrollMutex sync.RWMutex + tLSCertEnrollArgsForCall []struct { + } + tLSCertEnrollReturns struct { + result1 bool + } + tLSCertEnrollReturnsOnCall map[int]struct { + result1 bool + } + TLSCertUpdatedStub func() bool + tLSCertUpdatedMutex sync.RWMutex + tLSCertUpdatedArgsForCall []struct { + } + tLSCertUpdatedReturns struct { + result1 bool + } + tLSCertUpdatedReturnsOnCall map[int]struct { + result1 bool + } + TLSReenrollNeededStub func() bool + tLSReenrollNeededMutex sync.RWMutex + tLSReenrollNeededArgsForCall []struct { + } + tLSReenrollNeededReturns struct { + result1 bool + } + tLSReenrollNeededReturnsOnCall map[int]struct { + result1 bool + } + TLScertNewKeyReenrollStub func() bool + tLScertNewKeyReenrollMutex sync.RWMutex + tLScertNewKeyReenrollArgsForCall []struct { + } + tLScertNewKeyReenrollReturns struct { + result1 bool + } + tLScertNewKeyReenrollReturnsOnCall map[int]struct { + result1 bool + } + UpgradeDBsStub func() bool + upgradeDBsMutex sync.RWMutex + upgradeDBsArgsForCall []struct { + } + upgradeDBsReturns struct { + result1 bool + } + upgradeDBsReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Update) CertificateCreated() bool { + fake.certificateCreatedMutex.Lock() + ret, specificReturn := fake.certificateCreatedReturnsOnCall[len(fake.certificateCreatedArgsForCall)] + fake.certificateCreatedArgsForCall = append(fake.certificateCreatedArgsForCall, struct { + }{}) + stub := fake.CertificateCreatedStub + fakeReturns := fake.certificateCreatedReturns + fake.recordInvocation("CertificateCreated", []interface{}{}) + fake.certificateCreatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CertificateCreatedCallCount() int { + fake.certificateCreatedMutex.RLock() + defer fake.certificateCreatedMutex.RUnlock() + return len(fake.certificateCreatedArgsForCall) +} + +func (fake *Update) CertificateCreatedCalls(stub func() bool) { + fake.certificateCreatedMutex.Lock() + defer fake.certificateCreatedMutex.Unlock() + fake.CertificateCreatedStub = stub +} + +func (fake *Update) CertificateCreatedReturns(result1 bool) { + fake.certificateCreatedMutex.Lock() + defer fake.certificateCreatedMutex.Unlock() + fake.CertificateCreatedStub = nil + fake.certificateCreatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CertificateCreatedReturnsOnCall(i int, result1 bool) { + fake.certificateCreatedMutex.Lock() + defer fake.certificateCreatedMutex.Unlock() + fake.CertificateCreatedStub = nil + if fake.certificateCreatedReturnsOnCall == nil { + fake.certificateCreatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.certificateCreatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CertificateUpdated() bool { + fake.certificateUpdatedMutex.Lock() + ret, specificReturn := fake.certificateUpdatedReturnsOnCall[len(fake.certificateUpdatedArgsForCall)] + fake.certificateUpdatedArgsForCall = append(fake.certificateUpdatedArgsForCall, struct { + }{}) + stub := fake.CertificateUpdatedStub + fakeReturns := fake.certificateUpdatedReturns + fake.recordInvocation("CertificateUpdated", []interface{}{}) + fake.certificateUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CertificateUpdatedCallCount() int { + fake.certificateUpdatedMutex.RLock() + defer fake.certificateUpdatedMutex.RUnlock() + return len(fake.certificateUpdatedArgsForCall) +} + +func (fake *Update) CertificateUpdatedCalls(stub func() bool) { + fake.certificateUpdatedMutex.Lock() + defer fake.certificateUpdatedMutex.Unlock() + fake.CertificateUpdatedStub = stub +} + +func (fake *Update) CertificateUpdatedReturns(result1 bool) { + fake.certificateUpdatedMutex.Lock() + defer fake.certificateUpdatedMutex.Unlock() + fake.CertificateUpdatedStub = nil + fake.certificateUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CertificateUpdatedReturnsOnCall(i int, result1 bool) { + fake.certificateUpdatedMutex.Lock() + defer fake.certificateUpdatedMutex.Unlock() + fake.CertificateUpdatedStub = nil + if fake.certificateUpdatedReturnsOnCall == nil { + fake.certificateUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.certificateUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConfigOverridesUpdated() bool { + fake.configOverridesUpdatedMutex.Lock() + ret, specificReturn := fake.configOverridesUpdatedReturnsOnCall[len(fake.configOverridesUpdatedArgsForCall)] + fake.configOverridesUpdatedArgsForCall = append(fake.configOverridesUpdatedArgsForCall, struct { + }{}) + stub := fake.ConfigOverridesUpdatedStub + fakeReturns := fake.configOverridesUpdatedReturns + fake.recordInvocation("ConfigOverridesUpdated", []interface{}{}) + fake.configOverridesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ConfigOverridesUpdatedCallCount() int { + fake.configOverridesUpdatedMutex.RLock() + defer fake.configOverridesUpdatedMutex.RUnlock() + return len(fake.configOverridesUpdatedArgsForCall) +} + +func (fake *Update) ConfigOverridesUpdatedCalls(stub func() bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = stub +} + +func (fake *Update) ConfigOverridesUpdatedReturns(result1 bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = nil + fake.configOverridesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ConfigOverridesUpdatedReturnsOnCall(i int, result1 bool) { + fake.configOverridesUpdatedMutex.Lock() + defer fake.configOverridesUpdatedMutex.Unlock() + fake.ConfigOverridesUpdatedStub = nil + if fake.configOverridesUpdatedReturnsOnCall == nil { + fake.configOverridesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.configOverridesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) CryptoBackupNeeded() bool { + fake.cryptoBackupNeededMutex.Lock() + ret, specificReturn := fake.cryptoBackupNeededReturnsOnCall[len(fake.cryptoBackupNeededArgsForCall)] + fake.cryptoBackupNeededArgsForCall = append(fake.cryptoBackupNeededArgsForCall, struct { + }{}) + stub := fake.CryptoBackupNeededStub + fakeReturns := fake.cryptoBackupNeededReturns + fake.recordInvocation("CryptoBackupNeeded", []interface{}{}) + fake.cryptoBackupNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) CryptoBackupNeededCallCount() int { + fake.cryptoBackupNeededMutex.RLock() + defer fake.cryptoBackupNeededMutex.RUnlock() + return len(fake.cryptoBackupNeededArgsForCall) +} + +func (fake *Update) CryptoBackupNeededCalls(stub func() bool) { + fake.cryptoBackupNeededMutex.Lock() + defer fake.cryptoBackupNeededMutex.Unlock() + fake.CryptoBackupNeededStub = stub +} + +func (fake *Update) CryptoBackupNeededReturns(result1 bool) { + fake.cryptoBackupNeededMutex.Lock() + defer fake.cryptoBackupNeededMutex.Unlock() + fake.CryptoBackupNeededStub = nil + fake.cryptoBackupNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) CryptoBackupNeededReturnsOnCall(i int, result1 bool) { + fake.cryptoBackupNeededMutex.Lock() + defer fake.cryptoBackupNeededMutex.Unlock() + fake.CryptoBackupNeededStub = nil + if fake.cryptoBackupNeededReturnsOnCall == nil { + fake.cryptoBackupNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.cryptoBackupNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) DindArgsUpdated() bool { + fake.dindArgsUpdatedMutex.Lock() + ret, specificReturn := fake.dindArgsUpdatedReturnsOnCall[len(fake.dindArgsUpdatedArgsForCall)] + fake.dindArgsUpdatedArgsForCall = append(fake.dindArgsUpdatedArgsForCall, struct { + }{}) + stub := fake.DindArgsUpdatedStub + fakeReturns := fake.dindArgsUpdatedReturns + fake.recordInvocation("DindArgsUpdated", []interface{}{}) + fake.dindArgsUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) DindArgsUpdatedCallCount() int { + fake.dindArgsUpdatedMutex.RLock() + defer fake.dindArgsUpdatedMutex.RUnlock() + return len(fake.dindArgsUpdatedArgsForCall) +} + +func (fake *Update) DindArgsUpdatedCalls(stub func() bool) { + fake.dindArgsUpdatedMutex.Lock() + defer fake.dindArgsUpdatedMutex.Unlock() + fake.DindArgsUpdatedStub = stub +} + +func (fake *Update) DindArgsUpdatedReturns(result1 bool) { + fake.dindArgsUpdatedMutex.Lock() + defer fake.dindArgsUpdatedMutex.Unlock() + fake.DindArgsUpdatedStub = nil + fake.dindArgsUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) DindArgsUpdatedReturnsOnCall(i int, result1 bool) { + fake.dindArgsUpdatedMutex.Lock() + defer fake.dindArgsUpdatedMutex.Unlock() + fake.DindArgsUpdatedStub = nil + if fake.dindArgsUpdatedReturnsOnCall == nil { + fake.dindArgsUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.dindArgsUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertEnroll() bool { + fake.ecertEnrollMutex.Lock() + ret, specificReturn := fake.ecertEnrollReturnsOnCall[len(fake.ecertEnrollArgsForCall)] + fake.ecertEnrollArgsForCall = append(fake.ecertEnrollArgsForCall, struct { + }{}) + stub := fake.EcertEnrollStub + fakeReturns := fake.ecertEnrollReturns + fake.recordInvocation("EcertEnroll", []interface{}{}) + fake.ecertEnrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertEnrollCallCount() int { + fake.ecertEnrollMutex.RLock() + defer fake.ecertEnrollMutex.RUnlock() + return len(fake.ecertEnrollArgsForCall) +} + +func (fake *Update) EcertEnrollCalls(stub func() bool) { + fake.ecertEnrollMutex.Lock() + defer fake.ecertEnrollMutex.Unlock() + fake.EcertEnrollStub = stub +} + +func (fake *Update) EcertEnrollReturns(result1 bool) { + fake.ecertEnrollMutex.Lock() + defer fake.ecertEnrollMutex.Unlock() + fake.EcertEnrollStub = nil + fake.ecertEnrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertEnrollReturnsOnCall(i int, result1 bool) { + fake.ecertEnrollMutex.Lock() + defer fake.ecertEnrollMutex.Unlock() + fake.EcertEnrollStub = nil + if fake.ecertEnrollReturnsOnCall == nil { + fake.ecertEnrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertEnrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertNewKeyReenroll() bool { + fake.ecertNewKeyReenrollMutex.Lock() + ret, specificReturn := fake.ecertNewKeyReenrollReturnsOnCall[len(fake.ecertNewKeyReenrollArgsForCall)] + fake.ecertNewKeyReenrollArgsForCall = append(fake.ecertNewKeyReenrollArgsForCall, struct { + }{}) + stub := fake.EcertNewKeyReenrollStub + fakeReturns := fake.ecertNewKeyReenrollReturns + fake.recordInvocation("EcertNewKeyReenroll", []interface{}{}) + fake.ecertNewKeyReenrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertNewKeyReenrollCallCount() int { + fake.ecertNewKeyReenrollMutex.RLock() + defer fake.ecertNewKeyReenrollMutex.RUnlock() + return len(fake.ecertNewKeyReenrollArgsForCall) +} + +func (fake *Update) EcertNewKeyReenrollCalls(stub func() bool) { + fake.ecertNewKeyReenrollMutex.Lock() + defer fake.ecertNewKeyReenrollMutex.Unlock() + fake.EcertNewKeyReenrollStub = stub +} + +func (fake *Update) EcertNewKeyReenrollReturns(result1 bool) { + fake.ecertNewKeyReenrollMutex.Lock() + defer fake.ecertNewKeyReenrollMutex.Unlock() + fake.EcertNewKeyReenrollStub = nil + fake.ecertNewKeyReenrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertNewKeyReenrollReturnsOnCall(i int, result1 bool) { + fake.ecertNewKeyReenrollMutex.Lock() + defer fake.ecertNewKeyReenrollMutex.Unlock() + fake.EcertNewKeyReenrollStub = nil + if fake.ecertNewKeyReenrollReturnsOnCall == nil { + fake.ecertNewKeyReenrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertNewKeyReenrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertReenrollNeeded() bool { + fake.ecertReenrollNeededMutex.Lock() + ret, specificReturn := fake.ecertReenrollNeededReturnsOnCall[len(fake.ecertReenrollNeededArgsForCall)] + fake.ecertReenrollNeededArgsForCall = append(fake.ecertReenrollNeededArgsForCall, struct { + }{}) + stub := fake.EcertReenrollNeededStub + fakeReturns := fake.ecertReenrollNeededReturns + fake.recordInvocation("EcertReenrollNeeded", []interface{}{}) + fake.ecertReenrollNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertReenrollNeededCallCount() int { + fake.ecertReenrollNeededMutex.RLock() + defer fake.ecertReenrollNeededMutex.RUnlock() + return len(fake.ecertReenrollNeededArgsForCall) +} + +func (fake *Update) EcertReenrollNeededCalls(stub func() bool) { + fake.ecertReenrollNeededMutex.Lock() + defer fake.ecertReenrollNeededMutex.Unlock() + fake.EcertReenrollNeededStub = stub +} + +func (fake *Update) EcertReenrollNeededReturns(result1 bool) { + fake.ecertReenrollNeededMutex.Lock() + defer fake.ecertReenrollNeededMutex.Unlock() + fake.EcertReenrollNeededStub = nil + fake.ecertReenrollNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertReenrollNeededReturnsOnCall(i int, result1 bool) { + fake.ecertReenrollNeededMutex.Lock() + defer fake.ecertReenrollNeededMutex.Unlock() + fake.EcertReenrollNeededStub = nil + if fake.ecertReenrollNeededReturnsOnCall == nil { + fake.ecertReenrollNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertReenrollNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertUpdated() bool { + fake.ecertUpdatedMutex.Lock() + ret, specificReturn := fake.ecertUpdatedReturnsOnCall[len(fake.ecertUpdatedArgsForCall)] + fake.ecertUpdatedArgsForCall = append(fake.ecertUpdatedArgsForCall, struct { + }{}) + stub := fake.EcertUpdatedStub + fakeReturns := fake.ecertUpdatedReturns + fake.recordInvocation("EcertUpdated", []interface{}{}) + fake.ecertUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) EcertUpdatedCallCount() int { + fake.ecertUpdatedMutex.RLock() + defer fake.ecertUpdatedMutex.RUnlock() + return len(fake.ecertUpdatedArgsForCall) +} + +func (fake *Update) EcertUpdatedCalls(stub func() bool) { + fake.ecertUpdatedMutex.Lock() + defer fake.ecertUpdatedMutex.Unlock() + fake.EcertUpdatedStub = stub +} + +func (fake *Update) EcertUpdatedReturns(result1 bool) { + fake.ecertUpdatedMutex.Lock() + defer fake.ecertUpdatedMutex.Unlock() + fake.EcertUpdatedStub = nil + fake.ecertUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) EcertUpdatedReturnsOnCall(i int, result1 bool) { + fake.ecertUpdatedMutex.Lock() + defer fake.ecertUpdatedMutex.Unlock() + fake.EcertUpdatedStub = nil + if fake.ecertUpdatedReturnsOnCall == nil { + fake.ecertUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.ecertUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdated() bool { + fake.fabricVersionUpdatedMutex.Lock() + ret, specificReturn := fake.fabricVersionUpdatedReturnsOnCall[len(fake.fabricVersionUpdatedArgsForCall)] + fake.fabricVersionUpdatedArgsForCall = append(fake.fabricVersionUpdatedArgsForCall, struct { + }{}) + stub := fake.FabricVersionUpdatedStub + fakeReturns := fake.fabricVersionUpdatedReturns + fake.recordInvocation("FabricVersionUpdated", []interface{}{}) + fake.fabricVersionUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) FabricVersionUpdatedCallCount() int { + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + return len(fake.fabricVersionUpdatedArgsForCall) +} + +func (fake *Update) FabricVersionUpdatedCalls(stub func() bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = stub +} + +func (fake *Update) FabricVersionUpdatedReturns(result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + fake.fabricVersionUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdatedReturnsOnCall(i int, result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + if fake.fabricVersionUpdatedReturnsOnCall == nil { + fake.fabricVersionUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.fabricVersionUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) GetCreatedCertType() common.SecretType { + fake.getCreatedCertTypeMutex.Lock() + ret, specificReturn := fake.getCreatedCertTypeReturnsOnCall[len(fake.getCreatedCertTypeArgsForCall)] + fake.getCreatedCertTypeArgsForCall = append(fake.getCreatedCertTypeArgsForCall, struct { + }{}) + stub := fake.GetCreatedCertTypeStub + fakeReturns := fake.getCreatedCertTypeReturns + fake.recordInvocation("GetCreatedCertType", []interface{}{}) + fake.getCreatedCertTypeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) GetCreatedCertTypeCallCount() int { + fake.getCreatedCertTypeMutex.RLock() + defer fake.getCreatedCertTypeMutex.RUnlock() + return len(fake.getCreatedCertTypeArgsForCall) +} + +func (fake *Update) GetCreatedCertTypeCalls(stub func() common.SecretType) { + fake.getCreatedCertTypeMutex.Lock() + defer fake.getCreatedCertTypeMutex.Unlock() + fake.GetCreatedCertTypeStub = stub +} + +func (fake *Update) GetCreatedCertTypeReturns(result1 common.SecretType) { + fake.getCreatedCertTypeMutex.Lock() + defer fake.getCreatedCertTypeMutex.Unlock() + fake.GetCreatedCertTypeStub = nil + fake.getCreatedCertTypeReturns = struct { + result1 common.SecretType + }{result1} +} + +func (fake *Update) GetCreatedCertTypeReturnsOnCall(i int, result1 common.SecretType) { + fake.getCreatedCertTypeMutex.Lock() + defer fake.getCreatedCertTypeMutex.Unlock() + fake.GetCreatedCertTypeStub = nil + if fake.getCreatedCertTypeReturnsOnCall == nil { + fake.getCreatedCertTypeReturnsOnCall = make(map[int]struct { + result1 common.SecretType + }) + } + fake.getCreatedCertTypeReturnsOnCall[i] = struct { + result1 common.SecretType + }{result1} +} + +func (fake *Update) ImagesUpdated() bool { + fake.imagesUpdatedMutex.Lock() + ret, specificReturn := fake.imagesUpdatedReturnsOnCall[len(fake.imagesUpdatedArgsForCall)] + fake.imagesUpdatedArgsForCall = append(fake.imagesUpdatedArgsForCall, struct { + }{}) + stub := fake.ImagesUpdatedStub + fakeReturns := fake.imagesUpdatedReturns + fake.recordInvocation("ImagesUpdated", []interface{}{}) + fake.imagesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ImagesUpdatedCallCount() int { + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + return len(fake.imagesUpdatedArgsForCall) +} + +func (fake *Update) ImagesUpdatedCalls(stub func() bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = stub +} + +func (fake *Update) ImagesUpdatedReturns(result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + fake.imagesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdatedReturnsOnCall(i int, result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + if fake.imagesUpdatedReturnsOnCall == nil { + fake.imagesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) MSPUpdated() bool { + fake.mSPUpdatedMutex.Lock() + ret, specificReturn := fake.mSPUpdatedReturnsOnCall[len(fake.mSPUpdatedArgsForCall)] + fake.mSPUpdatedArgsForCall = append(fake.mSPUpdatedArgsForCall, struct { + }{}) + stub := fake.MSPUpdatedStub + fakeReturns := fake.mSPUpdatedReturns + fake.recordInvocation("MSPUpdated", []interface{}{}) + fake.mSPUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MSPUpdatedCallCount() int { + fake.mSPUpdatedMutex.RLock() + defer fake.mSPUpdatedMutex.RUnlock() + return len(fake.mSPUpdatedArgsForCall) +} + +func (fake *Update) MSPUpdatedCalls(stub func() bool) { + fake.mSPUpdatedMutex.Lock() + defer fake.mSPUpdatedMutex.Unlock() + fake.MSPUpdatedStub = stub +} + +func (fake *Update) MSPUpdatedReturns(result1 bool) { + fake.mSPUpdatedMutex.Lock() + defer fake.mSPUpdatedMutex.Unlock() + fake.MSPUpdatedStub = nil + fake.mSPUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MSPUpdatedReturnsOnCall(i int, result1 bool) { + fake.mSPUpdatedMutex.Lock() + defer fake.mSPUpdatedMutex.Unlock() + fake.MSPUpdatedStub = nil + if fake.mSPUpdatedReturnsOnCall == nil { + fake.mSPUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.mSPUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV2() bool { + fake.migrateToV2Mutex.Lock() + ret, specificReturn := fake.migrateToV2ReturnsOnCall[len(fake.migrateToV2ArgsForCall)] + fake.migrateToV2ArgsForCall = append(fake.migrateToV2ArgsForCall, struct { + }{}) + stub := fake.MigrateToV2Stub + fakeReturns := fake.migrateToV2Returns + fake.recordInvocation("MigrateToV2", []interface{}{}) + fake.migrateToV2Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MigrateToV2CallCount() int { + fake.migrateToV2Mutex.RLock() + defer fake.migrateToV2Mutex.RUnlock() + return len(fake.migrateToV2ArgsForCall) +} + +func (fake *Update) MigrateToV2Calls(stub func() bool) { + fake.migrateToV2Mutex.Lock() + defer fake.migrateToV2Mutex.Unlock() + fake.MigrateToV2Stub = stub +} + +func (fake *Update) MigrateToV2Returns(result1 bool) { + fake.migrateToV2Mutex.Lock() + defer fake.migrateToV2Mutex.Unlock() + fake.MigrateToV2Stub = nil + fake.migrateToV2Returns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV2ReturnsOnCall(i int, result1 bool) { + fake.migrateToV2Mutex.Lock() + defer fake.migrateToV2Mutex.Unlock() + fake.MigrateToV2Stub = nil + if fake.migrateToV2ReturnsOnCall == nil { + fake.migrateToV2ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrateToV2ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV24() bool { + fake.migrateToV24Mutex.Lock() + ret, specificReturn := fake.migrateToV24ReturnsOnCall[len(fake.migrateToV24ArgsForCall)] + fake.migrateToV24ArgsForCall = append(fake.migrateToV24ArgsForCall, struct { + }{}) + stub := fake.MigrateToV24Stub + fakeReturns := fake.migrateToV24Returns + fake.recordInvocation("MigrateToV24", []interface{}{}) + fake.migrateToV24Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MigrateToV24CallCount() int { + fake.migrateToV24Mutex.RLock() + defer fake.migrateToV24Mutex.RUnlock() + return len(fake.migrateToV24ArgsForCall) +} + +func (fake *Update) MigrateToV24Calls(stub func() bool) { + fake.migrateToV24Mutex.Lock() + defer fake.migrateToV24Mutex.Unlock() + fake.MigrateToV24Stub = stub +} + +func (fake *Update) MigrateToV24Returns(result1 bool) { + fake.migrateToV24Mutex.Lock() + defer fake.migrateToV24Mutex.Unlock() + fake.MigrateToV24Stub = nil + fake.migrateToV24Returns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV24ReturnsOnCall(i int, result1 bool) { + fake.migrateToV24Mutex.Lock() + defer fake.migrateToV24Mutex.Unlock() + fake.MigrateToV24Stub = nil + if fake.migrateToV24ReturnsOnCall == nil { + fake.migrateToV24ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrateToV24ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) NodeOUUpdated() bool { + fake.nodeOUUpdatedMutex.Lock() + ret, specificReturn := fake.nodeOUUpdatedReturnsOnCall[len(fake.nodeOUUpdatedArgsForCall)] + fake.nodeOUUpdatedArgsForCall = append(fake.nodeOUUpdatedArgsForCall, struct { + }{}) + stub := fake.NodeOUUpdatedStub + fakeReturns := fake.nodeOUUpdatedReturns + fake.recordInvocation("NodeOUUpdated", []interface{}{}) + fake.nodeOUUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) NodeOUUpdatedCallCount() int { + fake.nodeOUUpdatedMutex.RLock() + defer fake.nodeOUUpdatedMutex.RUnlock() + return len(fake.nodeOUUpdatedArgsForCall) +} + +func (fake *Update) NodeOUUpdatedCalls(stub func() bool) { + fake.nodeOUUpdatedMutex.Lock() + defer fake.nodeOUUpdatedMutex.Unlock() + fake.NodeOUUpdatedStub = stub +} + +func (fake *Update) NodeOUUpdatedReturns(result1 bool) { + fake.nodeOUUpdatedMutex.Lock() + defer fake.nodeOUUpdatedMutex.Unlock() + fake.NodeOUUpdatedStub = nil + fake.nodeOUUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) NodeOUUpdatedReturnsOnCall(i int, result1 bool) { + fake.nodeOUUpdatedMutex.Lock() + defer fake.nodeOUUpdatedMutex.Unlock() + fake.NodeOUUpdatedStub = nil + if fake.nodeOUUpdatedReturnsOnCall == nil { + fake.nodeOUUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.nodeOUUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) PeerTagUpdated() bool { + fake.peerTagUpdatedMutex.Lock() + ret, specificReturn := fake.peerTagUpdatedReturnsOnCall[len(fake.peerTagUpdatedArgsForCall)] + fake.peerTagUpdatedArgsForCall = append(fake.peerTagUpdatedArgsForCall, struct { + }{}) + stub := fake.PeerTagUpdatedStub + fakeReturns := fake.peerTagUpdatedReturns + fake.recordInvocation("PeerTagUpdated", []interface{}{}) + fake.peerTagUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) PeerTagUpdatedCallCount() int { + fake.peerTagUpdatedMutex.RLock() + defer fake.peerTagUpdatedMutex.RUnlock() + return len(fake.peerTagUpdatedArgsForCall) +} + +func (fake *Update) PeerTagUpdatedCalls(stub func() bool) { + fake.peerTagUpdatedMutex.Lock() + defer fake.peerTagUpdatedMutex.Unlock() + fake.PeerTagUpdatedStub = stub +} + +func (fake *Update) PeerTagUpdatedReturns(result1 bool) { + fake.peerTagUpdatedMutex.Lock() + defer fake.peerTagUpdatedMutex.Unlock() + fake.PeerTagUpdatedStub = nil + fake.peerTagUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) PeerTagUpdatedReturnsOnCall(i int, result1 bool) { + fake.peerTagUpdatedMutex.Lock() + defer fake.peerTagUpdatedMutex.Unlock() + fake.PeerTagUpdatedStub = nil + if fake.peerTagUpdatedReturnsOnCall == nil { + fake.peerTagUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.peerTagUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeeded() bool { + fake.restartNeededMutex.Lock() + ret, specificReturn := fake.restartNeededReturnsOnCall[len(fake.restartNeededArgsForCall)] + fake.restartNeededArgsForCall = append(fake.restartNeededArgsForCall, struct { + }{}) + stub := fake.RestartNeededStub + fakeReturns := fake.restartNeededReturns + fake.recordInvocation("RestartNeeded", []interface{}{}) + fake.restartNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) RestartNeededCallCount() int { + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + return len(fake.restartNeededArgsForCall) +} + +func (fake *Update) RestartNeededCalls(stub func() bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = stub +} + +func (fake *Update) RestartNeededReturns(result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + fake.restartNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) RestartNeededReturnsOnCall(i int, result1 bool) { + fake.restartNeededMutex.Lock() + defer fake.restartNeededMutex.Unlock() + fake.RestartNeededStub = nil + if fake.restartNeededReturnsOnCall == nil { + fake.restartNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.restartNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) SetDindArgsUpdated(arg1 bool) { + fake.setDindArgsUpdatedMutex.Lock() + fake.setDindArgsUpdatedArgsForCall = append(fake.setDindArgsUpdatedArgsForCall, struct { + arg1 bool + }{arg1}) + stub := fake.SetDindArgsUpdatedStub + fake.recordInvocation("SetDindArgsUpdated", []interface{}{arg1}) + fake.setDindArgsUpdatedMutex.Unlock() + if stub != nil { + fake.SetDindArgsUpdatedStub(arg1) + } +} + +func (fake *Update) SetDindArgsUpdatedCallCount() int { + fake.setDindArgsUpdatedMutex.RLock() + defer fake.setDindArgsUpdatedMutex.RUnlock() + return len(fake.setDindArgsUpdatedArgsForCall) +} + +func (fake *Update) SetDindArgsUpdatedCalls(stub func(bool)) { + fake.setDindArgsUpdatedMutex.Lock() + defer fake.setDindArgsUpdatedMutex.Unlock() + fake.SetDindArgsUpdatedStub = stub +} + +func (fake *Update) SetDindArgsUpdatedArgsForCall(i int) bool { + fake.setDindArgsUpdatedMutex.RLock() + defer fake.setDindArgsUpdatedMutex.RUnlock() + argsForCall := fake.setDindArgsUpdatedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Update) SpecUpdated() bool { + fake.specUpdatedMutex.Lock() + ret, specificReturn := fake.specUpdatedReturnsOnCall[len(fake.specUpdatedArgsForCall)] + fake.specUpdatedArgsForCall = append(fake.specUpdatedArgsForCall, struct { + }{}) + stub := fake.SpecUpdatedStub + fakeReturns := fake.specUpdatedReturns + fake.recordInvocation("SpecUpdated", []interface{}{}) + fake.specUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) SpecUpdatedCallCount() int { + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + return len(fake.specUpdatedArgsForCall) +} + +func (fake *Update) SpecUpdatedCalls(stub func() bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = stub +} + +func (fake *Update) SpecUpdatedReturns(result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + fake.specUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) SpecUpdatedReturnsOnCall(i int, result1 bool) { + fake.specUpdatedMutex.Lock() + defer fake.specUpdatedMutex.Unlock() + fake.SpecUpdatedStub = nil + if fake.specUpdatedReturnsOnCall == nil { + fake.specUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.specUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCertEnroll() bool { + fake.tLSCertEnrollMutex.Lock() + ret, specificReturn := fake.tLSCertEnrollReturnsOnCall[len(fake.tLSCertEnrollArgsForCall)] + fake.tLSCertEnrollArgsForCall = append(fake.tLSCertEnrollArgsForCall, struct { + }{}) + stub := fake.TLSCertEnrollStub + fakeReturns := fake.tLSCertEnrollReturns + fake.recordInvocation("TLSCertEnroll", []interface{}{}) + fake.tLSCertEnrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLSCertEnrollCallCount() int { + fake.tLSCertEnrollMutex.RLock() + defer fake.tLSCertEnrollMutex.RUnlock() + return len(fake.tLSCertEnrollArgsForCall) +} + +func (fake *Update) TLSCertEnrollCalls(stub func() bool) { + fake.tLSCertEnrollMutex.Lock() + defer fake.tLSCertEnrollMutex.Unlock() + fake.TLSCertEnrollStub = stub +} + +func (fake *Update) TLSCertEnrollReturns(result1 bool) { + fake.tLSCertEnrollMutex.Lock() + defer fake.tLSCertEnrollMutex.Unlock() + fake.TLSCertEnrollStub = nil + fake.tLSCertEnrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCertEnrollReturnsOnCall(i int, result1 bool) { + fake.tLSCertEnrollMutex.Lock() + defer fake.tLSCertEnrollMutex.Unlock() + fake.TLSCertEnrollStub = nil + if fake.tLSCertEnrollReturnsOnCall == nil { + fake.tLSCertEnrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSCertEnrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCertUpdated() bool { + fake.tLSCertUpdatedMutex.Lock() + ret, specificReturn := fake.tLSCertUpdatedReturnsOnCall[len(fake.tLSCertUpdatedArgsForCall)] + fake.tLSCertUpdatedArgsForCall = append(fake.tLSCertUpdatedArgsForCall, struct { + }{}) + stub := fake.TLSCertUpdatedStub + fakeReturns := fake.tLSCertUpdatedReturns + fake.recordInvocation("TLSCertUpdated", []interface{}{}) + fake.tLSCertUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLSCertUpdatedCallCount() int { + fake.tLSCertUpdatedMutex.RLock() + defer fake.tLSCertUpdatedMutex.RUnlock() + return len(fake.tLSCertUpdatedArgsForCall) +} + +func (fake *Update) TLSCertUpdatedCalls(stub func() bool) { + fake.tLSCertUpdatedMutex.Lock() + defer fake.tLSCertUpdatedMutex.Unlock() + fake.TLSCertUpdatedStub = stub +} + +func (fake *Update) TLSCertUpdatedReturns(result1 bool) { + fake.tLSCertUpdatedMutex.Lock() + defer fake.tLSCertUpdatedMutex.Unlock() + fake.TLSCertUpdatedStub = nil + fake.tLSCertUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSCertUpdatedReturnsOnCall(i int, result1 bool) { + fake.tLSCertUpdatedMutex.Lock() + defer fake.tLSCertUpdatedMutex.Unlock() + fake.TLSCertUpdatedStub = nil + if fake.tLSCertUpdatedReturnsOnCall == nil { + fake.tLSCertUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSCertUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSReenrollNeeded() bool { + fake.tLSReenrollNeededMutex.Lock() + ret, specificReturn := fake.tLSReenrollNeededReturnsOnCall[len(fake.tLSReenrollNeededArgsForCall)] + fake.tLSReenrollNeededArgsForCall = append(fake.tLSReenrollNeededArgsForCall, struct { + }{}) + stub := fake.TLSReenrollNeededStub + fakeReturns := fake.tLSReenrollNeededReturns + fake.recordInvocation("TLSReenrollNeeded", []interface{}{}) + fake.tLSReenrollNeededMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLSReenrollNeededCallCount() int { + fake.tLSReenrollNeededMutex.RLock() + defer fake.tLSReenrollNeededMutex.RUnlock() + return len(fake.tLSReenrollNeededArgsForCall) +} + +func (fake *Update) TLSReenrollNeededCalls(stub func() bool) { + fake.tLSReenrollNeededMutex.Lock() + defer fake.tLSReenrollNeededMutex.Unlock() + fake.TLSReenrollNeededStub = stub +} + +func (fake *Update) TLSReenrollNeededReturns(result1 bool) { + fake.tLSReenrollNeededMutex.Lock() + defer fake.tLSReenrollNeededMutex.Unlock() + fake.TLSReenrollNeededStub = nil + fake.tLSReenrollNeededReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLSReenrollNeededReturnsOnCall(i int, result1 bool) { + fake.tLSReenrollNeededMutex.Lock() + defer fake.tLSReenrollNeededMutex.Unlock() + fake.TLSReenrollNeededStub = nil + if fake.tLSReenrollNeededReturnsOnCall == nil { + fake.tLSReenrollNeededReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSReenrollNeededReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertNewKeyReenroll() bool { + fake.tLScertNewKeyReenrollMutex.Lock() + ret, specificReturn := fake.tLScertNewKeyReenrollReturnsOnCall[len(fake.tLScertNewKeyReenrollArgsForCall)] + fake.tLScertNewKeyReenrollArgsForCall = append(fake.tLScertNewKeyReenrollArgsForCall, struct { + }{}) + stub := fake.TLScertNewKeyReenrollStub + fakeReturns := fake.tLScertNewKeyReenrollReturns + fake.recordInvocation("TLScertNewKeyReenroll", []interface{}{}) + fake.tLScertNewKeyReenrollMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) TLScertNewKeyReenrollCallCount() int { + fake.tLScertNewKeyReenrollMutex.RLock() + defer fake.tLScertNewKeyReenrollMutex.RUnlock() + return len(fake.tLScertNewKeyReenrollArgsForCall) +} + +func (fake *Update) TLScertNewKeyReenrollCalls(stub func() bool) { + fake.tLScertNewKeyReenrollMutex.Lock() + defer fake.tLScertNewKeyReenrollMutex.Unlock() + fake.TLScertNewKeyReenrollStub = stub +} + +func (fake *Update) TLScertNewKeyReenrollReturns(result1 bool) { + fake.tLScertNewKeyReenrollMutex.Lock() + defer fake.tLScertNewKeyReenrollMutex.Unlock() + fake.TLScertNewKeyReenrollStub = nil + fake.tLScertNewKeyReenrollReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) TLScertNewKeyReenrollReturnsOnCall(i int, result1 bool) { + fake.tLScertNewKeyReenrollMutex.Lock() + defer fake.tLScertNewKeyReenrollMutex.Unlock() + fake.TLScertNewKeyReenrollStub = nil + if fake.tLScertNewKeyReenrollReturnsOnCall == nil { + fake.tLScertNewKeyReenrollReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLScertNewKeyReenrollReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) UpgradeDBs() bool { + fake.upgradeDBsMutex.Lock() + ret, specificReturn := fake.upgradeDBsReturnsOnCall[len(fake.upgradeDBsArgsForCall)] + fake.upgradeDBsArgsForCall = append(fake.upgradeDBsArgsForCall, struct { + }{}) + stub := fake.UpgradeDBsStub + fakeReturns := fake.upgradeDBsReturns + fake.recordInvocation("UpgradeDBs", []interface{}{}) + fake.upgradeDBsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) UpgradeDBsCallCount() int { + fake.upgradeDBsMutex.RLock() + defer fake.upgradeDBsMutex.RUnlock() + return len(fake.upgradeDBsArgsForCall) +} + +func (fake *Update) UpgradeDBsCalls(stub func() bool) { + fake.upgradeDBsMutex.Lock() + defer fake.upgradeDBsMutex.Unlock() + fake.UpgradeDBsStub = stub +} + +func (fake *Update) UpgradeDBsReturns(result1 bool) { + fake.upgradeDBsMutex.Lock() + defer fake.upgradeDBsMutex.Unlock() + fake.UpgradeDBsStub = nil + fake.upgradeDBsReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) UpgradeDBsReturnsOnCall(i int, result1 bool) { + fake.upgradeDBsMutex.Lock() + defer fake.upgradeDBsMutex.Unlock() + fake.UpgradeDBsStub = nil + if fake.upgradeDBsReturnsOnCall == nil { + fake.upgradeDBsReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.upgradeDBsReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.certificateCreatedMutex.RLock() + defer fake.certificateCreatedMutex.RUnlock() + fake.certificateUpdatedMutex.RLock() + defer fake.certificateUpdatedMutex.RUnlock() + fake.configOverridesUpdatedMutex.RLock() + defer fake.configOverridesUpdatedMutex.RUnlock() + fake.cryptoBackupNeededMutex.RLock() + defer fake.cryptoBackupNeededMutex.RUnlock() + fake.dindArgsUpdatedMutex.RLock() + defer fake.dindArgsUpdatedMutex.RUnlock() + fake.ecertEnrollMutex.RLock() + defer fake.ecertEnrollMutex.RUnlock() + fake.ecertNewKeyReenrollMutex.RLock() + defer fake.ecertNewKeyReenrollMutex.RUnlock() + fake.ecertReenrollNeededMutex.RLock() + defer fake.ecertReenrollNeededMutex.RUnlock() + fake.ecertUpdatedMutex.RLock() + defer fake.ecertUpdatedMutex.RUnlock() + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + fake.getCreatedCertTypeMutex.RLock() + defer fake.getCreatedCertTypeMutex.RUnlock() + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + fake.mSPUpdatedMutex.RLock() + defer fake.mSPUpdatedMutex.RUnlock() + fake.migrateToV2Mutex.RLock() + defer fake.migrateToV2Mutex.RUnlock() + fake.migrateToV24Mutex.RLock() + defer fake.migrateToV24Mutex.RUnlock() + fake.nodeOUUpdatedMutex.RLock() + defer fake.nodeOUUpdatedMutex.RUnlock() + fake.peerTagUpdatedMutex.RLock() + defer fake.peerTagUpdatedMutex.RUnlock() + fake.restartNeededMutex.RLock() + defer fake.restartNeededMutex.RUnlock() + fake.setDindArgsUpdatedMutex.RLock() + defer fake.setDindArgsUpdatedMutex.RUnlock() + fake.specUpdatedMutex.RLock() + defer fake.specUpdatedMutex.RUnlock() + fake.tLSCertEnrollMutex.RLock() + defer fake.tLSCertEnrollMutex.RUnlock() + fake.tLSCertUpdatedMutex.RLock() + defer fake.tLSCertUpdatedMutex.RUnlock() + fake.tLSReenrollNeededMutex.RLock() + defer fake.tLSReenrollNeededMutex.RUnlock() + fake.tLScertNewKeyReenrollMutex.RLock() + defer fake.tLScertNewKeyReenrollMutex.RUnlock() + fake.upgradeDBsMutex.RLock() + defer fake.upgradeDBsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Update) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ basepeer.Update = new(Update) diff --git a/pkg/offering/base/peer/override/deployment.go b/pkg/offering/base/peer/override/deployment.go new file mode 100644 index 00000000..a9da9fb6 --- /dev/null +++ b/pkg/offering/base/peer/override/deployment.go @@ -0,0 +1,971 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/container" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/serviceaccount" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// Container names +const ( + INIT = "init" + PEER = "peer" + DIND = "dind" + PROXY = "proxy" + FLUENTD = "chaincode-logs" + COUCHDB = "couchdb" + COUCHDBINIT = "couchdbinit" + CCLAUNCHER = "chaincode-launcher" + HSMCLIENT = "hsm-client" +) + +type CoreConfig interface { + UsingPKCS11() bool +} + +func (o *Override) Deployment(object v1.Object, deployment *appsv1.Deployment, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreateDeployment(instance, deployment) + case resources.Update: + return o.UpdateDeployment(instance, deployment) + } + + return nil +} + +func (o *Override) CreateDeployment(instance *current.IBPPeer, k8sDep *appsv1.Deployment) error { + var err error + name := instance.GetName() + + if !instance.Spec.License.Accept { + return errors.New("user must accept license before continuing") + } + + mspID := instance.Spec.MSPID + if mspID == "" { + return errors.New("failed to provide MSP ID for peer") + } + + deployment := dep.New(k8sDep) + initContainer, err := deployment.GetContainer(INIT) + if err != nil { + return errors.New("init container not found in deployment spec") + } + peerContainer, err := deployment.GetContainer(PEER) + if err != nil { + return errors.New("peer container not found in deployment spec") + } + grpcwebContainer, err := deployment.GetContainer(PROXY) + if err != nil { + return errors.New("grpc container not found in deployment spec") + } + + stateDB := instance.Spec.StateDb + if instance.UsingCouchDB() { + if !deployment.ContainerExists(COUCHDB) { // If coucdb container exists, don't need to create it again + stateDB = "CouchDB" + err = o.CreateCouchDBContainers(instance, deployment) + if err != nil { + return err + } + } + } else if instance.Spec.UsingLevelDB() { + stateDB = "goleveldb" + + peerContainer.AppendVolumeMountWithSubPathIfMissing("db-data", "/data/peer/ledgersData/stateLeveldb/", "data") + initContainer.AppendVolumeMountWithSubPathIfMissing("db-data", "/data/peer/ledgersData/stateLeveldb/", "data") + + deployment.UpdateContainer(peerContainer) + deployment.UpdateInitContainer(initContainer) + } else { + return errors.New("unsupported StateDB type") + } + + err = o.CommonDeploymentOverrides(instance, deployment) + if err != nil { + return err + } + + // At this point we know init, peer, and proxy containers exists. + // Can use MustGetContainer to avoid handling error + peerContainer = deployment.MustGetContainer(PEER) + grpcwebContainer = deployment.MustGetContainer(PROXY) + + deployment.SetImagePullSecrets(instance.Spec.ImagePullSecrets) + deployment.SetServiceAccountName(serviceaccount.GetName(name)) + deployment.SetAffinity(o.GetAffinity(instance)) + + peerContainer.AppendEnvIfMissing("CORE_PEER_ID", instance.Name) + peerContainer.AppendEnvIfMissing("CORE_PEER_LOCALMSPID", mspID) + + claimName := instance.Name + "-statedb-pvc" + if instance.Spec.CustomNames.PVC.StateDB != "" { + claimName = instance.Spec.CustomNames.PVC.StateDB + } + deployment.AppendPVCVolumeIfMissing("db-data", claimName) + + peerContainer.AppendEnvIfMissing("CORE_LEDGER_STATE_STATEDATABASE", stateDB) + + claimName = instance.Name + "-pvc" + if instance.Spec.CustomNames.PVC.Peer != "" { + claimName = instance.Spec.CustomNames.PVC.Peer + } + deployment.AppendPVCVolumeIfMissing("fabric-peer-0", claimName) + + deployment.AppendConfigMapVolumeIfMissing("fluentd-config", instance.Name+"-fluentd") + + ecertintercertSecret := fmt.Sprintf("ecert-%s-intercerts", instance.Name) + tlsintercertSecret := fmt.Sprintf("tls-%s-intercerts", instance.Name) + secretName := fmt.Sprintf("tls-%s-cacerts", instance.Name) + // Check if intermediate ecerts exists + if util.IntermediateSecretExists(o.Client, instance.Namespace, ecertintercertSecret) { + peerContainer.AppendVolumeMountIfMissing("ecert-intercerts", "/certs/msp/intermediatecerts") + deployment.AppendSecretVolumeIfMissing("ecert-intercerts", ecertintercertSecret) + } + + // Check if intermediate tlscerts exists + if util.IntermediateSecretExists(o.Client, instance.Namespace, tlsintercertSecret) { + peerContainer.AppendVolumeMountIfMissing("tls-intercerts", "/certs/msp/tlsintermediatecerts") + deployment.AppendSecretVolumeIfMissing("tls-intercerts", tlsintercertSecret) + } + + tlsCACertsSecret, err := o.GetTLSCACertsSecret(instance, secretName) + if err != nil { + return err + } + + var certsData string + count := 0 + for key, _ := range tlsCACertsSecret.Data { + v := fmt.Sprintf("/certs/msp/tlscacerts/%s", key) + if count == 0 { + certsData = certsData + v + } else { + certsData = certsData + " " + v + } + count++ + } + peerContainer.AppendEnvIfMissingOverrideIfPresent("CORE_OPERATIONS_TLS_CLIENTROOTCAS_FILES", certsData) + peerContainer.AppendEnvIfMissingOverrideIfPresent("CORE_PEER_TLS_ROOTCERT_FILE", certsData) + grpcwebContainer.AppendEnvIfMissingOverrideIfPresent("SERVER_TLS_CLIENT_CA_FILES", certsData) + peerContainer.AppendEnvIfMissingOverrideIfPresent("CORE_PEER_TLS_ROOTCERT_FILE", certsData) + + // Check if intermediate tlscerts exists + if util.IntermediateSecretExists(o.Client, instance.Namespace, tlsintercertSecret) { + secretName := fmt.Sprintf("tls-%s-intercerts", instance.Name) + tlsCAInterCertsSecret, err := o.GetTLSCACertsSecret(instance, secretName) + if err != nil { + return err + } + + var certsData string + count := 0 + for key, _ := range tlsCAInterCertsSecret.Data { + v := fmt.Sprintf("/certs/msp/tlsintermediatecerts/%s", key) + if count == 0 { + certsData = certsData + v + } else { + certsData = certsData + " " + v + } + count++ + } + peerContainer.AppendEnvIfMissingOverrideIfPresent("CORE_PEER_TLS_ROOTCERT_FILE", certsData) + } + + if o.AdminSecretExists(instance) { + deployment.AppendSecretVolumeIfMissing("ecert-admincerts", fmt.Sprintf("ecert-%s-admincerts", instance.Name)) + peerContainer.AppendVolumeMountIfMissing("ecert-admincerts", "/certs/msp/admincerts") + } + + co, err := instance.GetConfigOverride() + if err != nil { + return errors.Wrap(err, "failed to get configoverride") + } + + configOverride := co.(CoreConfig) + if !configOverride.UsingPKCS11() { + deployment.AppendSecretVolumeIfMissing("ecert-keystore", fmt.Sprintf("ecert-%s-keystore", instance.Name)) + peerContainer.AppendVolumeMountIfMissing("ecert-keystore", "/certs/msp/keystore") + } + + deployment.AppendSecretVolumeIfMissing("ecert-cacerts", fmt.Sprintf("ecert-%s-cacerts", instance.Name)) + deployment.AppendSecretVolumeIfMissing("ecert-signcert", fmt.Sprintf("ecert-%s-signcert", instance.Name)) + deployment.AppendSecretVolumeIfMissing("tls-cacerts", fmt.Sprintf("tls-%s-cacerts", instance.Name)) + deployment.AppendSecretVolumeIfMissing("tls-keystore", fmt.Sprintf("tls-%s-keystore", instance.Name)) + deployment.AppendSecretVolumeIfMissing("tls-signcert", fmt.Sprintf("tls-%s-signcert", instance.Name)) + + if o.OrdererCACertsSecretExists(instance) { + deployment.AppendSecretVolumeIfMissing("orderercacerts", fmt.Sprintf("%s-orderercacerts", instance.Name)) + peerContainer.AppendVolumeMountIfMissing("orderercacerts", "/orderer/certs") + } + + deployment.AppendConfigMapVolumeIfMissing("peer-config", instance.Name+"-config") + + secret := &corev1.Secret{} + err = o.Client.Get( + context.TODO(), + types.NamespacedName{Name: instance.GetName() + "-secret", Namespace: instance.GetNamespace()}, + secret, + ) + if err == nil { + peerContainer.AppendEnvIfMissing("RESTART_OLD_RESOURCEVER", secret.ObjectMeta.ResourceVersion) + } + + deployment.UpdateContainer(grpcwebContainer) + + if instance.UsingHSMProxy() { + peerContainer.AppendEnvIfMissing("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + } else if instance.IsHSMEnabled() { + deployment.AppendVolumeIfMissing(corev1.Volume{ + Name: "shared", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }) + + hsmConfig, err := config.ReadHSMConfig(o.Client, instance) + if err != nil { + return err + } + + hsmSettings(instance, hsmConfig, peerContainer, deployment) + + deployment.UpdateContainer(peerContainer) + } + + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + err = o.V2Deployment(instance, deployment) + if err != nil { + return errors.Wrap(err, "failed during V2 peer deployment overrides") + } + peerVersion := version.String(instance.Spec.FabricVersion) + if peerVersion.EqualWithoutTag(version.V2_4_1) || peerVersion.GreaterThan(version.V2_4_1) { + err = o.V24Deployment(instance, deployment) + if err != nil { + return errors.Wrap(err, "failed during V24 peer deployment overrides") + } + } + } else { + err = o.V1Deployment(instance, deployment) + if err != nil { + return errors.Wrap(err, "failed during V1 peer deployment overrides") + } + } + + return nil +} + +func (o *Override) V1Deployment(instance *current.IBPPeer, deployment *dep.Deployment) error { + initContainer := deployment.MustGetContainer(INIT) + + // NOTE: The container doesn't like when these bash commands are listed as separate strings in the command field + // which is why the command has been formatted into a single string. + // + // This command checks the permissions, owner, and group of /data/ and runs chmod/chown on required dirs if they + // have yet to be set to the default values (775, 1000, and 1000 respectively). + // + // cmdFormat is a format string that configured with the list of directories when used. + cmdFormat := "DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 " + cmdFormat += `&& PERM=$(stat -c "%%a" /data/) && USER=$(stat -c "%%u" /data/) && GROUP=$(stat -c "%%g" /data/) ` // %% is used to escape the percent symbol + cmdFormat += `&& if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; ` + cmdFormat += `then chmod -R ${DEFAULT_PERM} %[1]s && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} %[1]s; fi` + + // NOTE: There are two chmod & chown calls for /data/ and /data/peer/... because + // those are two separate pvc mounts, so we were running the command for both the locations. + if instance.UsingCouchDB() { + directories := "/data/" + cmd := fmt.Sprintf(cmdFormat, directories) + initContainer.SetCommand([]string{ + "bash", + "-c", + cmd, + }) + } else { + directories := "/{data/,data/peer/ledgersData/stateLeveldb}" + cmd := fmt.Sprintf(cmdFormat, directories) + initContainer.SetCommand([]string{ + "bash", + "-c", + cmd, + }) + } + + fluentdContainer, err := deployment.GetContainer(FLUENTD) + if err != nil { + return errors.New("fluentD container not found in deployment") + } + + dindContainer, err := deployment.GetContainer(DIND) + if err != nil { + return errors.New("dind container not found in deployment") + } + + dindargs := instance.Spec.DindArgs + if dindargs == nil { + dindargs = []string{"--log-driver", "fluentd", "--log-opt", "fluentd-address=localhost:9880", "--mtu", "1400"} + } + dindContainer.SetArgs(dindargs) + + image := instance.Spec.Images + if image != nil { + dindContainer.SetImage(image.DindImage, image.DindTag) + fluentdContainer.SetImage(image.FluentdImage, image.FluentdTag) + } + + resourcesRequest := instance.Spec.Resources + if resourcesRequest != nil { + if resourcesRequest.DinD != nil { + err = dindContainer.UpdateResources(resourcesRequest.DinD) + if err != nil { + return errors.Wrap(err, "resource update for dind failed") + } + } + + if resourcesRequest.FluentD != nil { + err = fluentdContainer.UpdateResources(resourcesRequest.FluentD) + if err != nil { + return errors.Wrap(err, "resource update for fluentd failed") + } + } + } + + peerContainer := deployment.MustGetContainer(PEER) + // env vars only required for 1.x peer + peerContainer.AppendEnvIfMissing("CORE_VM_ENDPOINT", "localhost:2375") + peerContainer.AppendEnvIfMissing("CORE_CHAINCODE_GOLANG_RUNTIME", "golangruntime:latest") + peerContainer.AppendEnvIfMissing("CORE_CHAINCODE_CAR_RUNTIME", "carruntime:latest") + peerContainer.AppendEnvIfMissing("CORE_CHAINCODE_JAVA_RUNTIME", "javaruntime:latest") + peerContainer.AppendEnvIfMissing("CORE_CHAINCODE_NODE_RUNTIME", "noderuntime:latest") + peerContainer.AppendEnvIfMissing("CORE_CHAINCODE_BUILDER", "builder:latest") + peerContainer.AppendEnvIfMissing("CORE_CHAINCODE_GOLANG_DYNAMICLINK", "true") + peerContainer.AppendEnvIfMissing("CORE_VM_DOCKER_ATTACHSTDOUT", "false") + + deployment.UpdateInitContainer(initContainer) + deployment.UpdateContainer(fluentdContainer) + deployment.UpdateContainer(dindContainer) + deployment.UpdateContainer(peerContainer) + return nil +} + +func (o *Override) V2Deployment(instance *current.IBPPeer, deployment *dep.Deployment) error { + + initContainer := deployment.MustGetContainer(INIT) + peerContainer := deployment.MustGetContainer(PEER) + + // NOTE: The container doesn't like when these bash commands are listed as separate strings in the command field + // which is why the command has been formatted into a single string. + // + // This command checks the permissions, owner, and group of /data/ and runs chmod/chown on required dirs if they + // have yet to be set to the default values (775, 1000, and 1000 respectively). + // + // cmdFormat is a format string that configured with the list of directories when used. + cmdFormat := "DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 " + cmdFormat += `&& PERM=$(stat -c "%%a" /data/) && USER=$(stat -c "%%u" /data/) && GROUP=$(stat -c "%%g" /data/) ` // %% is used to escape the percent symbol + cmdFormat += `&& if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; ` + cmdFormat += `then chmod -R ${DEFAULT_PERM} %[1]s && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} %[1]s; fi` + + // NOTE: There are multiple chmod & chown calls for /data/ and /data/peer/... and /cclauncher because + // those are separate pvc mounts, so we were running the command for all the locations + dirs := []string{"data/"} + if !instance.UsingCouchDB() { + dirs = append(dirs, "data/peer/ledgersData/stateLeveldb") + } + if instance.UsingCCLauncherImage() { + dirs = append(dirs, "cclauncher/") + } + + var directories string + if len(dirs) > 1 { + directories = fmt.Sprintf("/{%s}", strings.Join(dirs, ",")) + } else { + directories = "/data/" + } + + initContainer.SetCommand([]string{ + "bash", + "-c", + fmt.Sprintf(cmdFormat, directories), + }) + + if instance.UsingCCLauncherImage() { + err := o.CreateCCLauncherContainer(instance, deployment) + if err != nil { + return errors.Wrap(err, "failed to create chaincode launcher container") + } + + volumeMountName := fmt.Sprintf("%s-cclauncher", instance.GetName()) + initContainer.AppendVolumeMountIfMissing(volumeMountName, "/cclauncher") + peerContainer.AppendVolumeMountIfMissing(volumeMountName, "/cclauncher") + + peerContainer.AppendEnvIfMissing("IBP_BUILDER_SHARED_DIR", "/cclauncher") + peerContainer.AppendEnvIfMissing("IBP_BUILDER_ENDPOINT", "127.0.0.1:11111") + peerContainer.AppendEnvIfMissing("PEER_NAME", instance.GetName()) + + // Will delete these envs if found, these are not required for v2 + peerContainer.DeleteEnv("CORE_VM_ENDPOINT") + peerContainer.DeleteEnv("CORE_CHAINCODE_GOLANG_RUNTIME") + peerContainer.DeleteEnv("CORE_CHAINCODE_CAR_RUNTIME") + peerContainer.DeleteEnv("CORE_CHAINCODE_JAVA_RUNTIME") + peerContainer.DeleteEnv("CORE_CHAINCODE_NODE_RUNTIME") + peerContainer.DeleteEnv("CORE_CHAINCODE_BUILDER") + peerContainer.DeleteEnv("CORE_CHAINCODE_GOLANG_DYNAMICLINK") + peerContainer.DeleteEnv("CORE_VM_DOCKER_ATTACHSTDOUT") + + deployment.AppendEmptyDirVolumeIfMissing(fmt.Sprintf("%s-cclauncher", instance.Name), corev1.StorageMediumMemory) + } + + // Append a k/v JSON substitution map to the peer env. + if instance.Spec.ChaincodeBuilderConfig != nil { + configJSON, err := json.Marshal(instance.Spec.ChaincodeBuilderConfig) + if err != nil { + return errors.Wrapf(err, "failed to marshal chaincode builder config '%s',", instance.Spec.ChaincodeBuilderConfig) + } + peerContainer.AppendEnvIfMissing("CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG", string(configJSON)) + } + + deployment.UpdateInitContainer(initContainer) + deployment.UpdateContainer(peerContainer) + deployment.RemoveContainer(FLUENTD) + deployment.RemoveContainer(DIND) + return nil +} + +func (o *Override) V24Deployment(instance *current.IBPPeer, deployment *dep.Deployment) error { + if instance.UsingCCLauncherImage() { + launcherContainer := deployment.MustGetContainer(CCLAUNCHER) + + launcherContainer.LivenessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + launcherContainer.ReadinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + deployment.UpdateContainer(launcherContainer) + } + return nil +} + +func (o *Override) V2DeploymentUpdate(instance *current.IBPPeer, deployment *dep.Deployment) error { + peerContainer, err := deployment.GetContainer(PEER) + if err != nil { + return err + } + peerContainer.AppendEnvIfMissing("PEER_NAME", instance.GetName()) + + // For V2Deployments using chaincode-as-a-service and external builders, there is no need to include + // or modify the chaincode launcher sidecar. + if !instance.UsingCCLauncherImage() { + if err := o.V2Deployment(instance, deployment); err != nil { + return err + } + return nil + } + + // V2DeploymentUpdate will be triggered when migrating from v1 to v2 peer, during this update we might + // have to run initialization logic for a v2 fabric deployment. If the chaincode launcher container is + // not found, we try to initialize the deployment based on v2 deployment to add chaincode launcher + // before continuing with the remaining update logic. Not ideal, but until a bigger refactor is performed + // this is the least intrusive way to handle this. + ccLauncherContainer, err := deployment.GetContainer(CCLAUNCHER) + if err != nil { + if err := o.V2Deployment(instance, deployment); err != nil { + return err + } + return nil + } + + ccLauncherContainer = deployment.MustGetContainer(CCLAUNCHER) + images := instance.Spec.Images + if images != nil { + if images.CCLauncherImage != "" && images.CCLauncherTag != "" { + ccLauncherContainer.SetImage(images.CCLauncherImage, images.CCLauncherTag) + } + + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent( + "FILETRANSFERIMAGE", image.Format(instance.Spec.Images.PeerInitImage, instance.Spec.Images.PeerInitTag), + ) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent( + "BUILDERIMAGE", image.Format(instance.Spec.Images.BuilderImage, instance.Spec.Images.BuilderTag), + ) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent( + "GOENVIMAGE", image.Format(instance.Spec.Images.GoEnvImage, instance.Spec.Images.GoEnvTag), + ) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent( + "JAVAENVIMAGE", image.Format(instance.Spec.Images.JavaEnvImage, instance.Spec.Images.JavaEnvTag), + ) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent( + "NODEENVIMAGE", image.Format(instance.Spec.Images.NodeEnvImage, instance.Spec.Images.NodeEnvTag), + ) + ccLauncherContainer.AppendEnvIfMissing("CORE_PEER_LOCALMSPID", instance.Spec.MSPID) + } + + resourcesRequest := instance.Spec.Resources + if resourcesRequest != nil { + if resourcesRequest.CCLauncher != nil { + err := ccLauncherContainer.UpdateResources(resourcesRequest.CCLauncher) + if err != nil { + return errors.Wrap(err, "resource update for cclauncher failed") + } + } + } + + return nil +} + +func (o *Override) V24DeploymentUpdate(instance *current.IBPPeer, deployment *dep.Deployment) error { + if instance.UsingCCLauncherImage() { + ccLauncherContainer, err := deployment.GetContainer(CCLAUNCHER) + if err != nil { + return err + } + ccLauncherContainer.LivenessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + ccLauncherContainer.ReadinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + + deployment.UpdateContainer(ccLauncherContainer) + } + return nil +} + +func (o *Override) CreateCCLauncherContainer(instance *current.IBPPeer, deployment *dep.Deployment) error { + ccLauncherContainer, err := container.LoadFromFile(o.DefaultCCLauncherFile) + if err != nil { + return errors.Wrap(err, "failed to read default chaincode launcher container file") + } + + images := instance.Spec.Images + if images == nil || images.CCLauncherImage == "" { + return errors.New("no image specified for chaincode launcher") + } + + ccLauncherContainer.SetImage(images.CCLauncherImage, images.CCLauncherTag) + ccLauncherContainer.AppendEnvIfMissing("KUBE_NAMESPACE", instance.GetNamespace()) + ccLauncherContainer.AppendEnvIfMissing("SHARED_VOLUME_PATH", "/cclauncher") + ccLauncherContainer.AppendEnvIfMissing("IMAGEPULLSECRETS", strings.Join(instance.Spec.ImagePullSecrets, " ")) + ccLauncherContainer.AppendEnvIfMissing("CORE_PEER_LOCALMSPID", instance.Spec.MSPID) + + valueFrom := &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + } + ccLauncherContainer.AppendEnvVarValueFromIfMissing("PEER_POD_NAME", valueFrom) + + valueFrom = &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + } + ccLauncherContainer.AppendEnvVarValueFromIfMissing("PEER_POD_UID", valueFrom) + + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent("FILETRANSFERIMAGE", image.Format(instance.Spec.Images.PeerInitImage, instance.Spec.Images.PeerInitTag)) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent("BUILDERIMAGE", image.Format(instance.Spec.Images.BuilderImage, instance.Spec.Images.BuilderTag)) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent("GOENVIMAGE", image.Format(instance.Spec.Images.GoEnvImage, instance.Spec.Images.GoEnvTag)) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent("JAVAENVIMAGE", image.Format(instance.Spec.Images.JavaEnvImage, instance.Spec.Images.JavaEnvTag)) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent("NODEENVIMAGE", image.Format(instance.Spec.Images.NodeEnvImage, instance.Spec.Images.NodeEnvTag)) + ccLauncherContainer.AppendEnvIfMissingOverrideIfPresent("PEER_ID", instance.GetName()) + ccLauncherContainer.AppendVolumeMountIfMissing(fmt.Sprintf("%s-cclauncher", instance.Name), "/cclauncher") + + resourcesRequest := instance.Spec.Resources + if resourcesRequest != nil { + if resourcesRequest.CCLauncher != nil { + err = ccLauncherContainer.UpdateResources(resourcesRequest.CCLauncher) + if err != nil { + return errors.Wrap(err, "resource update for cclauncher failed") + } + } + } + + deployment.AddContainer(*ccLauncherContainer) + return nil +} + +func (o *Override) UpdateDeployment(instance *current.IBPPeer, k8sDep *appsv1.Deployment) error { + deployment := dep.New(k8sDep) + err := o.CommonDeploymentOverrides(instance, deployment) + if err != nil { + return err + } + + switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { + case version.V1: + err = o.V1Deployment(instance, deployment) + if err != nil { + return errors.Wrap(err, "failed during V1 peer deployment overrides") + } + case version.V2: + err := o.V2DeploymentUpdate(instance, deployment) + if err != nil { + return errors.Wrapf(err, "failed to update V2 fabric deployment for instance '%s'", instance.GetName()) + } + peerVersion := version.String(instance.Spec.FabricVersion) + if peerVersion.EqualWithoutTag(version.V2_4_1) || peerVersion.GreaterThan(version.V2_4_1) { + err := o.V24DeploymentUpdate(instance, deployment) + if err != nil { + return errors.Wrapf(err, "failed to update V24 fabric deployment for instance '%s'", instance.GetName()) + } + } + } + + if instance.UsingCouchDB() { + couchdb := deployment.MustGetContainer(COUCHDB) + + image := instance.Spec.Images + if image != nil { + couchdb.SetImage(image.CouchDBImage, image.CouchDBTag) + } + + couchdb.AppendEnvIfMissing("SKIP_PERMISSIONS_UPDATE", "true") + } + + if instance.UsingHSMProxy() { + peerContainer := deployment.MustGetContainer(PEER) + peerContainer.UpdateEnv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + deployment.UpdateContainer(peerContainer) + } else if instance.IsHSMEnabled() { + hsmInitCont := deployment.MustGetContainer(HSMCLIENT) + image := instance.Spec.Images + if image != nil { + hsmInitCont.SetImage(image.HSMImage, image.HSMTag) + } + } + + return nil +} + +func (o *Override) CommonDeploymentOverrides(instance *current.IBPPeer, deployment *dep.Deployment) error { + initContainer := deployment.MustGetContainer(INIT) + peerContainer := deployment.MustGetContainer(PEER) + grpcContainer, err := deployment.GetContainer(PROXY) + if err != nil { + return errors.New("proxy container not found in deployment spec") + } + + image := instance.Spec.Images + if image != nil { + initContainer.SetImage(image.PeerInitImage, image.PeerInitTag) + peerContainer.SetImage(image.PeerImage, image.PeerTag) + grpcContainer.SetImage(image.GRPCWebImage, image.GRPCWebTag) + + if instance.UsingCouchDB() { + couchdb := deployment.MustGetContainer(COUCHDB) + couchdb.SetImage(image.CouchDBImage, image.CouchDBTag) + + couchdbInitContainer := deployment.MustGetContainer(COUCHDBINIT) + couchdbInitContainer.SetImage(image.PeerInitImage, image.PeerInitTag) + } + } + + resourcesRequest := instance.Spec.Resources + if resourcesRequest != nil { + if resourcesRequest.Peer != nil { + err = peerContainer.UpdateResources(resourcesRequest.Peer) + if err != nil { + return errors.Wrap(err, "resource update for peer failed") + } + } + + if resourcesRequest.GRPCProxy != nil { + err = grpcContainer.UpdateResources(resourcesRequest.GRPCProxy) + if err != nil { + return errors.Wrap(err, "resource update for grpcproxy failed") + } + } + + if instance.UsingCouchDB() { + couchdb := deployment.MustGetContainer(COUCHDB) + if resourcesRequest.CouchDB != nil { + err = couchdb.UpdateResources(resourcesRequest.CouchDB) + if err != nil { + return errors.Wrap(err, "resource update for couchdb failed") + } + } + + couchdbinit := deployment.MustGetContainer(COUCHDBINIT) + if resourcesRequest.Init != nil { + err = couchdbinit.UpdateResources(resourcesRequest.Init) + if err != nil { + return errors.Wrap(err, "resource update for couchdb init failed") + } + } + } + } + + externalAddress := instance.Spec.PeerExternalEndpoint + if externalAddress != "" { + peerContainer.AppendEnvIfMissing("CORE_PEER_GOSSIP_EXTERNALENDPOINT", externalAddress) + peerContainer.AppendEnvIfMissing("CORE_PEER_GOSSIP_ENDPOINT", externalAddress) + grpcContainer.AppendEnvIfMissing("EXTERNAL_ADDRESS", externalAddress) + } + + if instance.Spec.Replicas != nil { + if *instance.Spec.Replicas > 1 { + return errors.New("replicas > 1 not allowed in IBPPeer") + } + deployment.SetReplicas(instance.Spec.Replicas) + } + + deployment.UpdateContainer(peerContainer) + deployment.UpdateContainer(grpcContainer) + return nil +} + +func (o *Override) CreateCouchDBContainers(instance *current.IBPPeer, deployment *dep.Deployment) error { + couchdbUser := o.CouchdbUser + if couchdbUser == "" { + couchdbUser = util.GenerateRandomString(32) + } + + couchdbPassword := o.CouchdbPassword + if couchdbPassword == "" { + couchdbPassword = util.GenerateRandomString(32) + } + + couchdbContainer, err := container.LoadFromFile(o.DefaultCouchContainerFile) + if err != nil { + return errors.Wrap(err, "failed to read default couch container file") + } + + couchdbInitContainer, err := container.LoadFromFile(o.DefaultCouchInitContainerFile) + if err != nil { + return errors.Wrap(err, "failed to read default couch init container file") + } + + image := instance.Spec.Images + if image != nil { + couchdbContainer.SetImage(image.CouchDBImage, image.CouchDBTag) + couchdbInitContainer.SetImage(image.PeerInitImage, image.PeerInitTag) + } + + couchdbContainer.AppendEnvIfMissing("COUCHDB_USER", couchdbUser) + couchdbContainer.AppendEnvIfMissing("COUCHDB_PASSWORD", couchdbPassword) + couchdbContainer.AppendEnvIfMissing("SKIP_PERMISSIONS_UPDATE", "true") + + peerContainer := deployment.MustGetContainer(PEER) + peerContainer.AppendEnvIfMissing("CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME", couchdbUser) + peerContainer.AppendEnvIfMissing("CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD", couchdbPassword) + peerContainer.AppendEnvIfMissing("CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS", "localhost:5984") + peerContainer.AppendEnvIfMissing("CORE_LEDGER_STATE_COUCHDBCONFIG_MAXRETRIESONSTARTUP", "20") + + deployment.AddContainer(*couchdbContainer) + deployment.AddInitContainer(*couchdbInitContainer) + deployment.UpdateContainer(peerContainer) + + return nil +} + +func (o *Override) GetAffinity(instance *current.IBPPeer) *corev1.Affinity { + arch := instance.Spec.Arch + zone := instance.Spec.Zone + region := instance.Spec.Region + nodeSelectorTerms := common.GetNodeSelectorTerms(arch, zone, region) + + orgName := instance.Spec.MSPID + podAntiAffinity := common.GetPodAntiAffinity(orgName) + + affinity := &corev1.Affinity{ + PodAntiAffinity: podAntiAffinity, + } + + if len(nodeSelectorTerms[0].MatchExpressions) != 0 { + affinity.NodeAffinity = &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: nodeSelectorTerms, + }, + } + } + + return affinity +} + +func (o *Override) AdminSecretExists(instance *current.IBPPeer) bool { + secret := &corev1.Secret{} + err := o.Client.Get(context.TODO(), types.NamespacedName{ + Name: fmt.Sprintf("ecert-%s-admincerts", instance.Name), + Namespace: instance.Namespace}, secret) + if err != nil { + return false + } + + return true +} + +func (o *Override) OrdererCACertsSecretExists(instance *current.IBPPeer) bool { + err := o.Client.Get(context.TODO(), types.NamespacedName{ + Name: fmt.Sprintf("%s-orderercacerts", instance.Name), + Namespace: instance.Namespace}, &corev1.Secret{}) + if err != nil { + return false + } + + return true +} + +func (o *Override) GetTLSCACertsSecret(instance *current.IBPPeer, secretName string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := o.Client.Get(context.TODO(), types.NamespacedName{ + Name: secretName, + Namespace: instance.Namespace}, secret) + if err != nil { + } + + return secret, nil +} + +func hsmInitContainer(instance *current.IBPPeer, hsmConfig *config.HSMConfig) *container.Container { + hsmLibraryPath := hsmConfig.Library.FilePath + hsmLibraryName := filepath.Base(hsmLibraryPath) + + f := false + user := int64(0) + mountPath := "/shared" + return &container.Container{ + Container: &corev1.Container{ + Name: "hsm-client", + Image: fmt.Sprintf("%s:%s", instance.Spec.Images.HSMImage, instance.Spec.Images.HSMTag), + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("mkdir -p %s/hsm && dst=\"%s/hsm/%s\" && echo \"Copying %s to ${dst}\" && mkdir -p $(dirname $dst) && cp -r %s $dst", mountPath, mountPath, hsmLibraryName, hsmLibraryPath, hsmLibraryPath), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &user, + RunAsNonRoot: &f, + }, + VolumeMounts: []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "shared", + MountPath: mountPath, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + }, + } +} + +func hsmSettings(instance *current.IBPPeer, hsmConfig *config.HSMConfig, peerCont container.Container, deployment *deployment.Deployment) { + for _, v := range hsmConfig.GetVolumes() { + deployment.AppendVolumeIfMissing(v) + } + + for _, vm := range hsmConfig.GetVolumeMounts() { + peerCont.AppendVolumeMountStructIfMissing(vm) + } + + for _, env := range hsmConfig.GetEnvs() { + peerCont.AppendEnvStructIfMissing(env) + } + + peerCont.AppendVolumeMountWithSubPathIfMissing("shared", "/hsm/lib", "hsm") + + if hsmConfig.Library.Auth != nil { + deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendPullSecretIfMissing( + deployment.Spec.Template.Spec.ImagePullSecrets, + hsmConfig.Library.Auth.ImagePullSecret, + ) + } + + deployment.AddInitContainer(*hsmInitContainer(instance, hsmConfig)) + + // If daemon settings are configured in HSM config, create a sidecar that is running the daemon image + if hsmConfig.Daemon != nil { + hsmDaemonSettings(instance, hsmConfig, peerCont, deployment) + } +} + +func hsmDaemonSettings(instance *current.IBPPeer, hsmConfig *config.HSMConfig, peerCont container.Container, deployment *deployment.Deployment) { + // Unable to launch daemon if not running priviledged moe + t := true + peerCont.SecurityContext.Privileged = &t + peerCont.SecurityContext.AllowPrivilegeEscalation = &t + + // Update command in deployment to ensure that deamon is running before starting the ca + peerCont.Command = []string{ + "sh", + "-c", + fmt.Sprintf("%s && %s", config.DAEMON_CHECK_CMD, "peer node start"), + } + + // This is the shared volume where the file 'pkcsslotd-luanched' is touched to let + // other containers know that the daemon has successfully launched. + peerCont.AppendVolumeMountIfMissing("shared", "/shared") + + pvcVolumeName := "fabric-peer-0" + // Certain token information requires to be stored in persistent store, the administrator + // responsible for configuring HSM sets the HSM config to point to the path where the PVC + // needs to be mounted. + var pvcMount *corev1.VolumeMount + for _, vm := range hsmConfig.MountPaths { + if vm.UsePVC { + pvcMount = &corev1.VolumeMount{ + Name: pvcVolumeName, + MountPath: vm.MountPath, + } + } + } + + // If a pull secret is required to pull daemon image, update the deployment's image pull secrets + if hsmConfig.Daemon.Auth != nil { + deployment.Spec.Template.Spec.ImagePullSecrets = util.AppendPullSecretIfMissing( + deployment.Spec.Template.Spec.ImagePullSecrets, + hsmConfig.Daemon.Auth.ImagePullSecret, + ) + } + + // Add daemon container to the deployment + config.AddDaemonContainer(hsmConfig, deployment, instance.GetResource(current.HSMDAEMON), pvcMount) + + // If a pvc mount has been configured in HSM config, set the volume mount on the ca container + // and PVC volume to deployment if missing + if pvcMount != nil { + peerCont.AppendVolumeMountStructIfMissing(*pvcMount) + deployment.AppendPVCVolumeIfMissing(pvcVolumeName, instance.PVCName()) + } +} diff --git a/pkg/offering/base/peer/override/deployment_test.go b/pkg/offering/base/peer/override/deployment_test.go new file mode 100644 index 00000000..cc21e743 --- /dev/null +++ b/pkg/offering/base/peer/override/deployment_test.go @@ -0,0 +1,1111 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + v2peerconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + dep "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/deployment" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +var testMatrix [][]resource.Quantity + +var _ = Describe("Base Peer Deployment Overrides", func() { + const ( + definitionsDir = "../../../../../definitions/peer" + ) + + var ( + overrider *override.Override + instance *current.IBPPeer + deployment *dep.Deployment + k8sDep *appsv1.Deployment + mockKubeClient *mocks.Client + ) + + BeforeEach(func() { + var err error + + k8sDep, err = util.GetDeploymentFromFile("../../../../../definitions/peer/deployment.yaml") + Expect(err).NotTo(HaveOccurred()) + deployment = dep.New(k8sDep) + + mockKubeClient = &mocks.Client{} + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + hsmConfig := &config.HSMConfig{ + Type: "hsm", + Version: "v1", + MountPaths: []config.MountPath{ + config.MountPath{ + Name: "hsmcrypto", + Secret: "hsmcrypto", + MountPath: "/hsm", + Paths: []config.Path{ + { + Key: "cafile.pem", + Path: "cafile.pem", + }, + { + Key: "cert.pem", + Path: "cert.pem", + }, + { + Key: "key.pem", + Path: "key.pem", + }, + { + Key: "server.pem", + Path: "server.pem", + }, + }, + }, + config.MountPath{ + Name: "hsmconfig", + Secret: "hsmcrypto", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + Paths: []config.Path{ + { + Key: "Chrystoki.conf", + Path: "Chrystoki.conf", + }, + }, + }, + }, + Envs: []corev1.EnvVar{ + { + Name: "env1", + Value: "env1value", + }, + }, + } + + configBytes, err := yaml.Marshal(hsmConfig) + if err != nil { + return err + } + o := obj.(*corev1.ConfigMap) + o.Data = map[string]string{"ibp-hsm-config.yaml": string(configBytes)} + } + return nil + } + + overrider = &override.Override{ + Client: mockKubeClient, + DefaultCouchContainerFile: filepath.Join(definitionsDir, "couchdb.yaml"), + DefaultCouchInitContainerFile: filepath.Join(definitionsDir, "couchdb-init.yaml"), + DefaultCCLauncherFile: filepath.Join(definitionsDir, "chaincode-launcher.yaml"), + CouchdbUser: "dbuser", + CouchdbPassword: "dbpassword", + } + testMatrix = [][]resource.Quantity{ + {resource.MustParse("10m"), resource.MustParse("15m"), resource.MustParse("11m"), resource.MustParse("16m"), resource.MustParse("1G"), resource.MustParse("2G")}, + {resource.MustParse("20m"), resource.MustParse("25m"), resource.MustParse("21m"), resource.MustParse("26m"), resource.MustParse("1G"), resource.MustParse("4G")}, + {resource.MustParse("30m"), resource.MustParse("35m"), resource.MustParse("31m"), resource.MustParse("36m"), resource.MustParse("3G"), resource.MustParse("6G")}, + {resource.MustParse("40m"), resource.MustParse("45m"), resource.MustParse("41m"), resource.MustParse("46m"), resource.MustParse("4G"), resource.MustParse("8G")}, + {resource.MustParse("50m"), resource.MustParse("55m"), resource.MustParse("51m"), resource.MustParse("56m"), resource.MustParse("5G"), resource.MustParse("10G")}, + {resource.MustParse("60m"), resource.MustParse("65m"), resource.MustParse("61m"), resource.MustParse("66m"), resource.MustParse("6G"), resource.MustParse("12G")}, + } + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peeroverride", + Namespace: "namespace1", + }, + Spec: current.IBPPeerSpec{ + License: current.License{ + Accept: true, + }, + MSPID: "peer-msp-id", + Storage: ¤t.PeerStorages{}, + Service: ¤t.Service{}, + Images: ¤t.PeerImages{}, + Arch: []string{"test-arch"}, + DindArgs: []string{"--log-driver", "fluentd", "--mtu", "1480"}, + Ingress: current.Ingress{ + TlsSecretName: "tlssecret", + }, + Zone: "dal", + Region: "us-south", + StateDb: "couchdb", + ImagePullSecrets: []string{"pullsecret1"}, + Resources: ¤t.PeerResources{ + DinD: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[0][0], + corev1.ResourceMemory: testMatrix[0][1], + corev1.ResourceEphemeralStorage: testMatrix[0][4], + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[0][2], + corev1.ResourceMemory: testMatrix[0][3], + corev1.ResourceEphemeralStorage: testMatrix[0][5], + }, + }, + Peer: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[1][0], + corev1.ResourceMemory: testMatrix[1][1], + corev1.ResourceEphemeralStorage: testMatrix[1][4], + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[1][2], + corev1.ResourceMemory: testMatrix[1][3], + corev1.ResourceEphemeralStorage: testMatrix[1][5], + }, + }, + GRPCProxy: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[2][0], + corev1.ResourceMemory: testMatrix[2][1], + corev1.ResourceEphemeralStorage: testMatrix[2][4], + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[2][2], + corev1.ResourceMemory: testMatrix[2][3], + corev1.ResourceEphemeralStorage: testMatrix[2][5], + }, + }, + FluentD: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[3][0], + corev1.ResourceMemory: testMatrix[3][1], + corev1.ResourceEphemeralStorage: testMatrix[3][4], + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[3][2], + corev1.ResourceMemory: testMatrix[3][3], + corev1.ResourceEphemeralStorage: testMatrix[3][5], + }, + }, + CouchDB: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[4][0], + corev1.ResourceMemory: testMatrix[4][1], + corev1.ResourceEphemeralStorage: testMatrix[4][4], + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[4][2], + corev1.ResourceMemory: testMatrix[4][3], + corev1.ResourceEphemeralStorage: testMatrix[4][5], + }, + }, + CCLauncher: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[5][0], + corev1.ResourceMemory: testMatrix[5][1], + corev1.ResourceEphemeralStorage: testMatrix[5][4], + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: testMatrix[5][2], + corev1.ResourceMemory: testMatrix[5][3], + corev1.ResourceEphemeralStorage: testMatrix[5][5], + }, + }, + }, + }, + } + }) + + Context("create", func() { + It("returns an error if license is not accepted", func() { + instance.Spec.License.Accept = false + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("user must accept license before continuing")) + }) + + It("returns an error if MSP ID not provided", func() { + instance.Spec.MSPID = "" + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to provide MSP ID for peer")) + }) + + It("sets default dind args if none provided", func() { + instance.Spec.DindArgs = nil + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.Containers[0].Args).To(Equal([]string{"--log-driver", "fluentd", "--log-opt", "fluentd-address=localhost:9880", "--mtu", "1400"})) + }) + + It("overrides value based on spec", func() { + mockKubeClient.GetReturnsOnCall(1, errors.New("no inter cert found")) + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting dind args", func() { + Expect(len(deployment.Spec.Template.Spec.Containers[0].Args)).To(Equal(4)) + }) + + By("setting service account", func() { + Expect(deployment.Spec.Template.Spec.ServiceAccountName).To(Equal(instance.Name)) + }) + + By("setting CORE_PEER_ID env var", func() { + ev := corev1.EnvVar{ + Name: "CORE_PEER_ID", + Value: instance.Name, + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + + By("setting CORE_PEER_LOCALMSPID env var", func() { + ev := corev1.EnvVar{ + Name: "CORE_PEER_LOCALMSPID", + Value: instance.Spec.MSPID, + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + + By("setting db-data volume", func() { + v := corev1.Volume{ + Name: "db-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.Name + "-statedb-pvc", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting CORE_LEDGER_STATE_STATEDATABASE env var", func() { + ev := corev1.EnvVar{ + Name: "CORE_LEDGER_STATE_STATEDATABASE", + Value: "CouchDB", + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + + By("setting CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME env var", func() { + ev := corev1.EnvVar{ + Name: "CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME", + Value: overrider.CouchdbUser, + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + + By("setting CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD env var", func() { + ev := corev1.EnvVar{ + Name: "CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD", + Value: overrider.CouchdbPassword, + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + + By("setting COUCHDB_USER env var", func() { + ev := corev1.EnvVar{ + Name: "COUCHDB_USER", + Value: overrider.CouchdbUser, + } + Expect(deployment.Spec.Template.Spec.Containers[4].Env).To(ContainElement(ev)) + }) + + By("setting COUCHDB_PASSWORD env var", func() { + ev := corev1.EnvVar{ + Name: "COUCHDB_PASSWORD", + Value: overrider.CouchdbPassword, + } + Expect(deployment.Spec.Template.Spec.Containers[4].Env).To(ContainElement(ev)) + }) + + By("setting SKIP_PERMISSIONS_UPDATE env var", func() { + ev := corev1.EnvVar{ + Name: "SKIP_PERMISSIONS_UPDATE", + Value: "true", + } + Expect(deployment.Spec.Template.Spec.Containers[4].Env).To(ContainElement(ev)) + }) + + By("setting image pull secret", func() { + Expect(deployment.Spec.Template.Spec.ImagePullSecrets).To(ContainElement(corev1.LocalObjectReference{ + Name: instance.Spec.ImagePullSecrets[0], + })) + }) + + By("setting fabric-peer-0 volume", func() { + v := corev1.Volume{ + Name: "fabric-peer-0", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.Name + "-pvc", + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting fluentd-config volume", func() { + v := corev1.Volume{ + Name: "fluentd-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Name + "-fluentd", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting affinity", func() { + expectedAffinity := overrider.GetAffinity(instance) + Expect(deployment.Spec.Template.Spec.Affinity).To(Equal(expectedAffinity)) + }) + + By("setting ecert admincerts volume and volume mount", func() { + v := corev1.Volume{ + Name: "ecert-admincerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-admincerts", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + + vm := corev1.VolumeMount{ + Name: "ecert-admincerts", + MountPath: "/certs/msp/admincerts", + } + Expect(deployment.Spec.Template.Spec.Containers[1].VolumeMounts).To(ContainElement(vm)) + }) + + By("setting ecert cacerts volume", func() { + v := corev1.Volume{ + Name: "ecert-cacerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-cacerts", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting ecert keystore volume", func() { + v := corev1.Volume{ + Name: "ecert-keystore", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-keystore", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting ecert signcert volume", func() { + v := corev1.Volume{ + Name: "ecert-signcert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("ecert-%s-signcert", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting tls cacerts volume", func() { + v := corev1.Volume{ + Name: "tls-cacerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("tls-%s-cacerts", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting tls keystore volume", func() { + v := corev1.Volume{ + Name: "tls-keystore", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("tls-%s-keystore", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting tls signcert volume", func() { + v := corev1.Volume{ + Name: "tls-signcert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("tls-%s-signcert", instance.Name), + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + By("setting peer-config volume", func() { + v := corev1.Volume{ + Name: "peer-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Name + "-config", + }, + }, + }, + } + Expect(deployment.Spec.Template.Spec.Volumes).To(ContainElement(v)) + }) + + CommonPeerDeploymentOverrides(instance, k8sDep) + }) + + Context("images", func() { + var ( + image *current.PeerImages + ) + + BeforeEach(func() { + image = ¤t.PeerImages{ + PeerInitImage: "init-image", + DindImage: "dind-image", + CouchDBImage: "couchdb-image", + PeerImage: "peer-image", + GRPCWebImage: "proxy-image", + FluentdImage: "fluentd-image", + } + instance.Spec.Images = image + }) + + When("no tag is passed", func() { + It("uses 'latest' for image tags", func() { + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("dind-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(Equal("peer-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[2].Image).To(Equal("proxy-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[3].Image).To(Equal("fluentd-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[4].Image).To(Equal("couchdb-image:latest")) + }) + }) + + When("tag is passed", func() { + It("uses the passed in tag for image tags", func() { + instance.Spec.Images = image + image.DindTag = "1.0.1" + image.CouchDBTag = "1.0.2" + image.PeerTag = "1.0.3" + image.GRPCWebTag = "1.0.4" + image.PeerInitTag = "2.0.0" + image.FluentdTag = "1.0.5" + + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:2.0.0")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("dind-image:1.0.1")) + Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(Equal("peer-image:1.0.3")) + Expect(deployment.Spec.Template.Spec.Containers[2].Image).To(Equal("proxy-image:1.0.4")) + Expect(deployment.Spec.Template.Spec.Containers[3].Image).To(Equal("fluentd-image:1.0.5")) + Expect(deployment.Spec.Template.Spec.Containers[4].Image).To(Equal("couchdb-image:1.0.2")) + }) + }) + + Context("chaincode container", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "latest", + CCLauncherImage: "chaincode-builder", + CCLauncherTag: "cclauncher-amd64", + BuilderImage: "ibp-ccenv", + BuilderTag: "builder-tag", + GoEnvImage: "ibp-goenv", + GoEnvTag: "goenv-tag", + JavaEnvImage: "ibp-javaenv", + JavaEnvTag: "javaenv-tag", + NodeEnvImage: "ibp-nodeenv", + NodeEnvTag: "nodeenv-tag", + } + }) + + It("creates chaincode launcher container", func() { + err := overrider.CreateCCLauncherContainer(instance, deployment) + Expect(err).NotTo(HaveOccurred()) + + ccLauncher := deployment.MustGetContainer("chaincode-launcher") + + By("setting resources from spec", func() { + Expect(ccLauncher.Resources.Requests[corev1.ResourceCPU]).To(Equal(testMatrix[5][0])) + Expect(ccLauncher.Resources.Requests[corev1.ResourceMemory]).To(Equal(testMatrix[5][1])) + Expect(ccLauncher.Resources.Requests[corev1.ResourceEphemeralStorage]).To(Equal(testMatrix[5][4])) + + Expect(ccLauncher.Resources.Limits[corev1.ResourceCPU]).To(Equal(testMatrix[5][2])) + Expect(ccLauncher.Resources.Limits[corev1.ResourceMemory]).To(Equal(testMatrix[5][3])) + Expect(ccLauncher.Resources.Limits[corev1.ResourceEphemeralStorage]).To(Equal(testMatrix[5][5])) + }) + + By("setting envs with the requestes images/spec", func() { + Expect(ccLauncher.Env).To(ContainElement(corev1.EnvVar{ + Name: "FILETRANSFERIMAGE", + Value: fmt.Sprintf("%s:%s", instance.Spec.Images.PeerInitImage, instance.Spec.Images.PeerInitTag), + })) + + Expect(ccLauncher.Env).To(ContainElement(corev1.EnvVar{ + Name: "BUILDERIMAGE", + Value: fmt.Sprintf("%s:%s", instance.Spec.Images.BuilderImage, instance.Spec.Images.BuilderTag), + })) + + Expect(ccLauncher.Env).To(ContainElement(corev1.EnvVar{ + Name: "GOENVIMAGE", + Value: fmt.Sprintf("%s:%s", instance.Spec.Images.GoEnvImage, instance.Spec.Images.GoEnvTag), + })) + + Expect(ccLauncher.Env).To(ContainElement(corev1.EnvVar{ + Name: "JAVAENVIMAGE", + Value: fmt.Sprintf("%s:%s", instance.Spec.Images.JavaEnvImage, instance.Spec.Images.JavaEnvTag), + })) + + Expect(ccLauncher.Env).To(ContainElement(corev1.EnvVar{ + Name: "NODEENVIMAGE", + Value: fmt.Sprintf("%s:%s", instance.Spec.Images.NodeEnvImage, instance.Spec.Images.NodeEnvTag), + })) + }) + }) + }) + }) + + Context("leveldb", func() { + BeforeEach(func() { + instance.Spec.StateDb = "leveldb" + }) + + It("overrides value based on spec", func() { + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting volume mount env var", func() { + vm := corev1.VolumeMount{ + Name: "db-data", + MountPath: "/data/peer/ledgersData/stateLeveldb/", + SubPath: "data", + } + Expect(deployment.Spec.Template.Spec.InitContainers[0].VolumeMounts).To(ContainElement(vm)) + Expect(deployment.Spec.Template.Spec.Containers[1].VolumeMounts).To(ContainElement(vm)) + }) + + By("setting CORE_LEDGER_STATE_STATEDATABASE env var", func() { + ev := corev1.EnvVar{ + Name: "CORE_LEDGER_STATE_STATEDATABASE", + Value: "goleveldb", + } + Expect(deployment.Spec.Template.Spec.Containers[1].Env).To(ContainElement(ev)) + }) + }) + }) + }) + + Context("update", func() { + BeforeEach(func() { + var err error + + err = overrider.CreateCouchDBContainers(instance, deployment) + Expect(err).NotTo(HaveOccurred()) + }) + + It("overrides value based on spec", func() { + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + CommonPeerDeploymentOverrides(instance, k8sDep) + }) + + It("sets init container command", func() { + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + init, err := deployment.GetContainer(override.INIT) + Expect(err).NotTo(HaveOccurred()) + cmd := "DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 " + cmd += `&& PERM=$(stat -c "%a" /data/) && USER=$(stat -c "%u" /data/) && GROUP=$(stat -c "%g" /data/) ` + cmd += `&& if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; ` + cmd += `then chmod -R ${DEFAULT_PERM} /data/ && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} /data/; fi` + Expect(init.Command).To(Equal([]string{"bash", "-c", cmd})) + }) + + Context("images", func() { + var ( + image *current.PeerImages + ) + + BeforeEach(func() { + image = ¤t.PeerImages{ + PeerInitImage: "init-image", + DindImage: "dind-image", + CouchDBImage: "couchdb-image", + PeerImage: "peer-image", + GRPCWebImage: "proxy-image", + FluentdImage: "fluentd-image", + } + instance.Spec.Images = image + }) + + When("no tag is passed", func() { + It("uses 'latest' for image tags", func() { + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("dind-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(Equal("peer-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[2].Image).To(Equal("proxy-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[3].Image).To(Equal("fluentd-image:latest")) + Expect(deployment.Spec.Template.Spec.Containers[4].Image).To(Equal("couchdb-image:latest")) + }) + }) + + When("tag is passed", func() { + It("uses the passed in tag for image tags", func() { + image.DindTag = "1.0.1" + image.CouchDBTag = "1.0.2" + image.PeerTag = "1.0.3" + image.GRPCWebTag = "1.0.4" + image.PeerInitTag = "2.0.0" + image.FluentdTag = "1.0.5" + + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + Expect(deployment.Spec.Template.Spec.InitContainers[0].Image).To(Equal("init-image:2.0.0")) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("dind-image:1.0.1")) + Expect(deployment.Spec.Template.Spec.Containers[1].Image).To(Equal("peer-image:1.0.3")) + Expect(deployment.Spec.Template.Spec.Containers[2].Image).To(Equal("proxy-image:1.0.4")) + Expect(deployment.Spec.Template.Spec.Containers[3].Image).To(Equal("fluentd-image:1.0.5")) + Expect(deployment.Spec.Template.Spec.Containers[4].Image).To(Equal("couchdb-image:1.0.2")) + }) + }) + }) + + Context("v2", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.4.1" + }) + + Context("chaincode launcher", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + CCLauncherImage: "new-cclauncher", + CCLauncherTag: "v2", + PeerInitImage: "new-peerinit", + PeerInitTag: "v2", + BuilderImage: "new-builder", + BuilderTag: "v2", + GoEnvImage: "new-goenv", + GoEnvTag: "v2", + JavaEnvImage: "new-javaenv", + JavaEnvTag: "v2", + NodeEnvImage: "new-nodeenv", + NodeEnvTag: "v2", + } + }) + + It("updates", func() { + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + By("setting chaincode launcher from spec to deployment", func() { + Expect(deployment.MustGetContainer(override.CCLAUNCHER).Image).To(Equal("new-cclauncher:v2")) + }) + + By("having a non-null cc launcher tag", func() { + _, err = deployment.GetContainer(override.CCLAUNCHER) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting env vars with new image values", func() { + Expect(deployment.MustGetContainer(override.CCLAUNCHER).Env).To(ContainElements( + corev1.EnvVar{ + Name: "FILETRANSFERIMAGE", + Value: "new-peerinit:v2", + }, + corev1.EnvVar{ + Name: "BUILDERIMAGE", + Value: "new-builder:v2", + }, + corev1.EnvVar{ + Name: "GOENVIMAGE", + Value: "new-goenv:v2", + }, + corev1.EnvVar{ + Name: "JAVAENVIMAGE", + Value: "new-javaenv:v2", + }, + corev1.EnvVar{ + Name: "NODEENVIMAGE", + Value: "new-nodeenv:v2", + }, + )) + }) + + By("changing permissions on the /cclauncher volume", func() { + init, err := deployment.GetContainer(override.INIT) + Expect(err).NotTo(HaveOccurred()) + Expect(init.Command).To(HaveLen(3)) + Expect(init.Command[0]).To(Equal("bash")) + Expect(init.Command[1]).To(Equal("-c")) + Expect(init.Command[2]).To(Equal("DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 && PERM=$(stat -c \"%a\" /data/) && USER=$(stat -c \"%u\" /data/) && GROUP=$(stat -c \"%g\" /data/) && if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; then chmod -R ${DEFAULT_PERM} /{data/,cclauncher/} && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} /{data/,cclauncher/}; fi")) + }) + }) + }) + + Context("chaincode launcher with leveldb", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + CCLauncherImage: "new-cclauncher", + CCLauncherTag: "v2", + } + instance.Spec.StateDb = "leveldb" + }) + + It("updates", func() { + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + By("having a non-null cc launcher tag", func() { + _, err = deployment.GetContainer(override.CCLAUNCHER) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting chaincode launcher from spec to deployment", func() { + Expect(deployment.MustGetContainer(override.CCLAUNCHER).Image).To(Equal("new-cclauncher:v2")) + }) + + By("changing permissions on the cclauncher and stateLeveldb volumes", func() { + init, err := deployment.GetContainer(override.INIT) + Expect(err).NotTo(HaveOccurred()) + Expect(init.Command).To(HaveLen(3)) + Expect(init.Command[0]).To(Equal("bash")) + Expect(init.Command[1]).To(Equal("-c")) + Expect(init.Command[2]).To(Equal("DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 && PERM=$(stat -c \"%a\" /data/) && USER=$(stat -c \"%u\" /data/) && GROUP=$(stat -c \"%g\" /data/) && if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; then chmod -R ${DEFAULT_PERM} /{data/,data/peer/ledgersData/stateLeveldb,cclauncher/} && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} /{data/,data/peer/ledgersData/stateLeveldb,cclauncher/}; fi")) + }) + }) + }) + + Context("external chaincode builder", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + PeerInitImage: "new-peer-init", + PeerInitTag: "latest", + PeerImage: "hyperledger/fabric-peer", + PeerTag: "2.4.1", + } + + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + }) + + When("a nil launcher is specified", func() { + It("emits a deployment without a launcher sidecar", func() { + _, err := deployment.GetContainer(override.CCLAUNCHER) + Expect(err).To(HaveOccurred()) + }) + + It("does not change permissions on the /cclauncher volume", func() { + init, err := deployment.GetContainer(override.INIT) + Expect(err).NotTo(HaveOccurred()) + Expect(init.Command).To(HaveLen(3)) + Expect(init.Command[0]).To(Equal("bash")) + Expect(init.Command[1]).To(Equal("-c")) + Expect(init.Command[2]).To(Equal("DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 && PERM=$(stat -c \"%a\" /data/) && USER=$(stat -c \"%u\" /data/) && GROUP=$(stat -c \"%g\" /data/) && if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; then chmod -R ${DEFAULT_PERM} /data/ && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} /data/; fi")) + }) + }) + }) + + Context("chaincode builder config map as env", func() { + BeforeEach(func() { + instance.Spec.ChaincodeBuilderConfig = map[string]string{ + "peername": "org1peer1", + } + + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + }) + + When("A chaincode builder config is present", func() { + It("Sets a JSON env map in the peer deployment", func() { + peer, err := deployment.GetContainer(override.PEER) + Expect(err).NotTo(HaveOccurred()) + Expect(peer.Env).NotTo(BeNil()) + + Expect(deployment.MustGetContainer(override.PEER).Env).To(ContainElement(corev1.EnvVar{ + Name: "CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG", + Value: "{\"peername\":\"org1peer1\"}", + })) + }) + }) + }) + + Context("couchbase and external builder images: regression test for Issue #3269", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + PeerInitImage: "new-peer-init", + PeerInitTag: "latest", + PeerImage: "hyperledger/fabric-peer", + PeerTag: "2.4.1", + } + + instance.Spec.StateDb = "couchdb" + + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + }) + + When("a nil launcher is specified with a couchdb state table", func() { + It("does not specify a bash set for the init container permission command", func() { + init, err := deployment.GetContainer(override.INIT) + Expect(err).NotTo(HaveOccurred()) + Expect(init.Command).To(HaveLen(3)) + Expect(init.Command[0]).To(Equal("bash")) + Expect(init.Command[1]).To(Equal("-c")) + Expect(init.Command[2]).To(Equal("DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 && PERM=$(stat -c \"%a\" /data/) && USER=$(stat -c \"%u\" /data/) && GROUP=$(stat -c \"%g\" /data/) && if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; then chmod -R ${DEFAULT_PERM} /data/ && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} /data/; fi")) + }) + }) + }) + + Context("leveldb and external builder images: regression test for Issue #3269", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + PeerInitImage: "new-peer-init", + PeerInitTag: "latest", + PeerImage: "hyperledger/fabric-peer", + PeerTag: "2.4.1", + } + + instance.Spec.StateDb = "leveldb" + + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + }) + + When("a nil launcher is specified with a leveldb state table", func() { + It("specifies a bash set for the init container permission command", func() { + init, err := deployment.GetContainer(override.INIT) + Expect(err).NotTo(HaveOccurred()) + Expect(init.Command).To(HaveLen(3)) + Expect(init.Command[0]).To(Equal("bash")) + Expect(init.Command[1]).To(Equal("-c")) + Expect(init.Command[2]).To(Equal("DEFAULT_PERM=775 && DEFAULT_USER=7051 && DEFAULT_GROUP=1000 && PERM=$(stat -c \"%a\" /data/) && USER=$(stat -c \"%u\" /data/) && GROUP=$(stat -c \"%g\" /data/) && if [ ${PERM} != ${DEFAULT_PERM} ] || [ ${USER} != ${DEFAULT_USER} ] || [ ${GROUP} != ${DEFAULT_GROUP} ]; then chmod -R ${DEFAULT_PERM} /{data/,data/peer/ledgersData/stateLeveldb} && chown -R -H ${DEFAULT_USER}:${DEFAULT_GROUP} /{data/,data/peer/ledgersData/stateLeveldb}; fi")) + }) + }) + }) + }) + + Context("v24", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.4.3" + instance.Spec.Images = ¤t.PeerImages{ + CCLauncherImage: "new-cclauncher", + CCLauncherTag: "v2", + PeerInitImage: "new-peerinit", + PeerInitTag: "v2", + BuilderImage: "new-builder", + BuilderTag: "v2", + GoEnvImage: "new-goenv", + GoEnvTag: "v2", + JavaEnvImage: "new-javaenv", + JavaEnvTag: "v2", + NodeEnvImage: "new-nodeenv", + NodeEnvTag: "v2", + } + }) + + Context("chaincode launcher", func() { + It("updates", func() { + err := overrider.Deployment(instance, k8sDep, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + By("setting liveliness probe to https", func() { + Expect(deployment.MustGetContainer(override.CCLAUNCHER).LivenessProbe.HTTPGet.Scheme).To(Equal(corev1.URISchemeHTTPS)) + }) + + By("setting readiness probe to https", func() { + Expect(deployment.MustGetContainer(override.CCLAUNCHER).ReadinessProbe.HTTPGet.Scheme).To(Equal(corev1.URISchemeHTTPS)) + }) + }) + }) + }) + }) + + Context("Replicas", func() { + When("Replicas is greater than 1", func() { + It("returns an error", func() { + replicas := int32(2) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("replicas > 1 not allowed in IBPPeer")) + }) + }) + When("Replicas is equal to 1", func() { + It("returns success", func() { + replicas := int32(1) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + When("Replicas is equal to 0", func() { + It("returns success", func() { + replicas := int32(0) + instance.Spec.Replicas = &replicas + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + When("Replicas is nil", func() { + It("returns success", func() { + instance.Spec.Replicas = nil + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Context("HSM", func() { + BeforeEach(func() { + configOverride := v2peerconfig.Core{ + Core: v2peer.Core{ + Peer: v2peer.Peer{ + BCCSP: &common.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &common.PKCS11Opts{ + Label: "partition1", + Pin: "B6T9Q7mGNG", + }, + }, + }, + }, + } + + configBytes, err := json.Marshal(configOverride) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: configBytes} + }) + + It("sets proxy env on peer container", func() { + instance.Spec.HSM = ¤t.HSM{PKCS11Endpoint: "1.2.3.4"} + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + d := dep.New(k8sDep) + Expect(d.MustGetContainer(override.PEER).Env).To(ContainElement(corev1.EnvVar{ + Name: "PKCS11_PROXY_SOCKET", + Value: "1.2.3.4", + })) + }) + + It("configures deployment to use HSM init image", func() { + err := overrider.Deployment(instance, k8sDep, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + d := dep.New(k8sDep) + By("setting volume mounts", func() { + Expect(d.MustGetContainer(override.PEER).VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "shared", + MountPath: "/hsm/lib", + SubPath: "hsm", + })) + + Expect(d.MustGetContainer(override.PEER).VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "hsmconfig", + MountPath: "/etc/Chrystoki.conf", + SubPath: "Chrystoki.conf", + })) + }) + + By("setting env vars", func() { + Expect(d.MustGetContainer(override.PEER).Env).To(ContainElement(corev1.EnvVar{ + Name: "env1", + Value: "env1value", + })) + }) + + By("creating HSM init container", func() { + Expect(d.ContainerExists("hsm-client")).To(Equal(true)) + }) + }) + }) +}) + +func CommonPeerDeploymentOverrides(instance *current.IBPPeer, deployment *appsv1.Deployment) { + // Perform check after override to make sure new values are in place + for i, c := range deployment.Spec.Template.Spec.Containers { + By(fmt.Sprintf("setting resources for container '%s'", c.Name), func() { + Expect(c.Resources.Requests[corev1.ResourceCPU]).To(Equal(testMatrix[i][0])) + Expect(c.Resources.Requests[corev1.ResourceMemory]).To(Equal(testMatrix[i][1])) + Expect(c.Resources.Requests[corev1.ResourceEphemeralStorage]).To(Equal(testMatrix[i][4])) + Expect(c.Resources.Limits[corev1.ResourceCPU]).To(Equal(testMatrix[i][2])) + Expect(c.Resources.Limits[corev1.ResourceMemory]).To(Equal(testMatrix[i][3])) + Expect(c.Resources.Limits[corev1.ResourceEphemeralStorage]).To(Equal(testMatrix[i][5])) + }) + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + if c.Name == "peer" { + By("string PEER_NAME in peer container", func() { + Expect(util.EnvExists(c.Env, "PEER_NAME")).To(Equal(true)) + Expect(util.GetEnvValue(c.Env, "PEER_NAME")).To(Equal(instance.GetName())) + }) + } + if c.Name == "chaincode-launcher" { + By("string CORE_PEER_LOCALMSPID in v2 chaincode-launcher container", func() { + Expect(util.EnvExists(c.Env, "CORE_PEER_LOCALMSPID")).To(Equal(true)) + Expect(util.GetEnvValue(c.Env, "CORE_PEER_LOCALMSPID")).To(Equal(instance.Spec.MSPID)) + }) + } + } + } +} diff --git a/pkg/offering/base/peer/override/override.go b/pkg/offering/base/peer/override/override.go new file mode 100644 index 00000000..dfa04c67 --- /dev/null +++ b/pkg/offering/base/peer/override/override.go @@ -0,0 +1,33 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" +) + +type Override struct { + Client controllerclient.Client + + DefaultCouchContainerFile string + DefaultCouchInitContainerFile string + CouchdbUser string + CouchdbPassword string + DefaultCCLauncherFile string +} diff --git a/pkg/offering/base/peer/override/override_suite_test.go b/pkg/offering/base/peer/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/base/peer/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/base/peer/override/override_test.go b/pkg/offering/base/peer/override/override_test.go new file mode 100644 index 00000000..92ac6a9a --- /dev/null +++ b/pkg/offering/base/peer/override/override_test.go @@ -0,0 +1,74 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" +) + +var _ = Describe("Base Peer Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPPeer + ) + + BeforeEach(func() { + overrider = &override.Override{ + Client: &mocks.Client{}, + } + instance = ¤t.IBPPeer{} + }) + + Context("Affnity", func() { + BeforeEach(func() { + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + MSPID: "peer-msp-id", + Arch: []string{"test-arch"}, + Zone: "dal", + Region: "us-south", + }, + } + }) + + It("returns an proper affinity when arch is passed", func() { + a := overrider.GetAffinity(instance) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"test-arch"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Key).To(Equal("orgname")) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"peer-msp-id"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight).To(Equal(int32(100))) + }) + + It("returns an proper affinity when no arch is passed", func() { + instance.Spec.Arch = []string{} + a := overrider.GetAffinity(instance) + Expect(a.NodeAffinity).NotTo(BeNil()) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values).To(Equal([]string{"dal"})) + Expect(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Values).To(Equal([]string{"us-south"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Key).To(Equal("orgname")) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.LabelSelector.MatchExpressions[0].Values).To(Equal([]string{"peer-msp-id"})) + Expect(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight).To(Equal(int32(100))) + }) + }) +}) diff --git a/pkg/offering/base/peer/override/pvc.go b/pkg/offering/base/peer/override/pvc.go new file mode 100644 index 00000000..fe55132b --- /dev/null +++ b/pkg/offering/base/peer/override/pvc.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) PVC(object v1.Object, pvc *corev1.PersistentVolumeClaim, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreatePVC(instance, pvc) + case resources.Update: + return o.UpdatePVC(instance, pvc) + } + + return nil +} + +func (o *Override) CreatePVC(instance *current.IBPPeer, pvc *corev1.PersistentVolumeClaim) error { + storage := instance.Spec.Storage + if storage != nil { + peerStorage := storage.Peer + if peerStorage != nil { + if peerStorage.Class != "" { + pvc.Spec.StorageClassName = &peerStorage.Class + } + if peerStorage.Size != "" { + quantity, err := resource.ParseQuantity(peerStorage.Size) + if err != nil { + return err + } + resourceMap := pvc.Spec.Resources.Requests + if resourceMap == nil { + resourceMap = corev1.ResourceList{} + } + resourceMap[corev1.ResourceStorage] = quantity + pvc.Spec.Resources.Requests = resourceMap + } + } + } + + if pvc.ObjectMeta.Labels == nil { + pvc.ObjectMeta.Labels = map[string]string{} + } + if instance.Spec.Zone != "" { + pvc.ObjectMeta.Labels["zone"] = instance.Spec.Zone + } + + if instance.Spec.Region != "" { + pvc.ObjectMeta.Labels["region"] = instance.Spec.Region + } + + return nil +} + +func (o *Override) UpdatePVC(instance *current.IBPPeer, pvc *corev1.PersistentVolumeClaim) error { + return nil +} diff --git a/pkg/offering/base/peer/override/pvc_test.go b/pkg/offering/base/peer/override/pvc_test.go new file mode 100644 index 00000000..b10fb064 --- /dev/null +++ b/pkg/offering/base/peer/override/pvc_test.go @@ -0,0 +1,99 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Peer PVC Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPPeer + pvc *corev1.PersistentVolumeClaim + ) + + BeforeEach(func() { + var err error + + pvc, err = util.GetPVCFromFile("../../../../../definitions/peer/pvc.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Zone: "zone1", + Region: "region1", + Storage: ¤t.PeerStorages{ + Peer: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting storage class", func() { + Expect(pvc.Spec.StorageClassName).To(Equal(&instance.Spec.Storage.Peer.Class)) + }) + + By("setting requested storage size", func() { + expectedRequests, err := resource.ParseQuantity(instance.Spec.Storage.Peer.Size) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.Resources.Requests).To(Equal(corev1.ResourceList{corev1.ResourceStorage: expectedRequests})) + }) + + By("setting zone labels", func() { + Expect(pvc.ObjectMeta.Labels["zone"]).To(Equal(instance.Spec.Zone)) + }) + + By("setting region labels", func() { + Expect(pvc.ObjectMeta.Labels["region"]).To(Equal(instance.Spec.Region)) + }) + }) + + It("sets class to manual if spec used local", func() { + instance.Spec.Storage.Peer.Class = "manual" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.StorageClassName).To(Equal("manual")) + }) + + It("returns an error if invalid value for size is used", func() { + instance.Spec.Storage.Peer.Size = "10x" + err := overrider.PVC(instance, pvc, resources.Create) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("quantities must match the regular expression")) + }) + }) +}) diff --git a/pkg/offering/base/peer/override/service.go b/pkg/offering/base/peer/override/service.go new file mode 100644 index 00000000..84b3020a --- /dev/null +++ b/pkg/offering/base/peer/override/service.go @@ -0,0 +1,53 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Service(object v1.Object, service *corev1.Service, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreateService(instance, service) + case resources.Update: + return o.UpdateService(instance, service) + } + + return nil +} + +func (o *Override) CreateService(instance *current.IBPPeer, service *corev1.Service) error { + if instance.Spec.Service != nil { + serviceType := instance.Spec.Service.Type + if serviceType != "" { + service.Spec.Type = serviceType + } + } + + return nil +} + +func (o *Override) UpdateService(instance *current.IBPPeer, service *corev1.Service) error { + return nil +} diff --git a/pkg/offering/base/peer/override/service_test.go b/pkg/offering/base/peer/override/service_test.go new file mode 100644 index 00000000..2cd3fc7d --- /dev/null +++ b/pkg/offering/base/peer/override/service_test.go @@ -0,0 +1,65 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Peer Service Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPPeer + service *corev1.Service + ) + + BeforeEach(func() { + var err error + + service, err = util.GetServiceFromFile("../../../../../definitions/peer/service.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Service: ¤t.Service{ + Type: corev1.ServiceTypeNodePort, + }, + }, + } + }) + + Context("create", func() { + It("overrides values based on spec", func() { + err := overrider.Service(instance, service, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting service type", func() { + Expect(service.Spec.Type).To(Equal(instance.Spec.Service.Type)) + }) + }) + }) +}) diff --git a/pkg/offering/base/peer/override/serviceaccount.go b/pkg/offering/base/peer/override/serviceaccount.go new file mode 100644 index 00000000..6f115e92 --- /dev/null +++ b/pkg/offering/base/peer/override/serviceaccount.go @@ -0,0 +1,58 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) ServiceAccount(object v1.Object, sa *corev1.ServiceAccount, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreateServiceAccount(instance, sa) + case resources.Update: + return o.UpdateServiceAccount(instance, sa) + } + + return nil +} + +func (o *Override) CreateServiceAccount(instance *current.IBPPeer, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) UpdateServiceAccount(instance *current.IBPPeer, sa *corev1.ServiceAccount) error { + return o.commonServiceAccount(instance, sa) +} + +func (o *Override) commonServiceAccount(instance *current.IBPPeer, sa *corev1.ServiceAccount) error { + for _, pullSecret := range instance.Spec.ImagePullSecrets { + imagePullSecret := corev1.LocalObjectReference{ + Name: pullSecret, + } + + sa.ImagePullSecrets = append(sa.ImagePullSecrets, imagePullSecret) + } + + return nil +} diff --git a/pkg/offering/base/peer/override/serviceaccount_test.go b/pkg/offering/base/peer/override/serviceaccount_test.go new file mode 100644 index 00000000..a5eacfbe --- /dev/null +++ b/pkg/offering/base/peer/override/serviceaccount_test.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Base Peer Service Account Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPPeer + sa *corev1.ServiceAccount + ) + + BeforeEach(func() { + var err error + + sa, err = util.GetServiceAccountFromFile("../../../../../definitions/peer/serviceaccount.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override1", + Namespace: "namespace1", + }, + Spec: current.IBPPeerSpec{ + ImagePullSecrets: []string{"pullsecret1"}, + }, + } + }) + + Context("create", func() { + It("overrides values in service account, based on Peer's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + }) + }) + + Context("update", func() { + It("overrides values in service account, based on Peer's instance spec", func() { + err := overrider.ServiceAccount(instance, sa, resources.Update) + Expect(err).NotTo(HaveOccurred()) + + By("setting the image pull secret", func() { + Expect(sa.ImagePullSecrets[1].Name).To(Equal(instance.Spec.ImagePullSecrets[0])) + }) + }) + }) + }) +}) diff --git a/pkg/offering/base/peer/override/statedbpvc.go b/pkg/offering/base/peer/override/statedbpvc.go new file mode 100644 index 00000000..b418a53f --- /dev/null +++ b/pkg/offering/base/peer/override/statedbpvc.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) StateDBPVC(object v1.Object, pvc *corev1.PersistentVolumeClaim, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreateStateDBPVC(instance, pvc) + case resources.Update: + return o.UpdateStateDBPVC(instance, pvc) + } + + return nil +} + +func (o *Override) CreateStateDBPVC(instance *current.IBPPeer, pvc *corev1.PersistentVolumeClaim) error { + storage := instance.Spec.Storage + if storage != nil { + stateDBStorage := storage.StateDB + if stateDBStorage != nil { + if stateDBStorage.Class != "" { + pvc.Spec.StorageClassName = &stateDBStorage.Class + } + if stateDBStorage.Size != "" { + quantity, err := resource.ParseQuantity(stateDBStorage.Size) + if err != nil { + return err + } + resourceMap := pvc.Spec.Resources.Requests + if resourceMap == nil { + resourceMap = corev1.ResourceList{} + } + resourceMap[corev1.ResourceStorage] = quantity + pvc.Spec.Resources.Requests = resourceMap + } + } + } + + if pvc.ObjectMeta.Labels == nil { + pvc.ObjectMeta.Labels = map[string]string{} + } + if instance.Spec.Zone != "" { + pvc.ObjectMeta.Labels["zone"] = instance.Spec.Zone + } + + if instance.Spec.Region != "" { + pvc.ObjectMeta.Labels["region"] = instance.Spec.Region + } + + return nil +} + +func (o *Override) UpdateStateDBPVC(instance *current.IBPPeer, cm *corev1.PersistentVolumeClaim) error { + return nil +} diff --git a/pkg/offering/base/peer/peer.go b/pkg/offering/base/peer/peer.go new file mode 100644 index 00000000..5eb4896c --- /dev/null +++ b/pkg/offering/base/peer/peer.go @@ -0,0 +1,1586 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package basepeer + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/certificate" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + peerconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/validator" + controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + jobv1 "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/job" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v2" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" +) + +var log = logf.Log.WithName("base_peer") + +const ( + DefaultCouchContainer = "./definitions/peer/couchdb.yaml" + DefaultCouchInitContainer = "./definitions/peer/couchdb-init.yaml" + + defaultDeployment = "./definitions/peer/deployment.yaml" + defaultPVC = "./definitions/peer/pvc.yaml" + defaultCouchDBPVC = "./definitions/peer/couchdb-pvc.yaml" + defaultService = "./definitions/peer/service.yaml" + defaultRole = "./definitions/peer/role.yaml" + defaultServiceAccount = "./definitions/peer/serviceaccount.yaml" + defaultRoleBinding = "./definitions/peer/rolebinding.yaml" + defaultFluentdConfigMap = "./definitions/peer/fluentd-configmap.yaml" +) + +type Override interface { + Deployment(v1.Object, *appsv1.Deployment, resources.Action) error + Service(v1.Object, *corev1.Service, resources.Action) error + PVC(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error + StateDBPVC(v1.Object, *corev1.PersistentVolumeClaim, resources.Action) error +} + +//go:generate counterfeiter -o mocks/deployment_manager.go -fake-name DeploymentManager . DeploymentManager + +type DeploymentManager interface { + resources.Manager + CheckForSecretChange(v1.Object, string, func(string, *appsv1.Deployment) bool) error + DeploymentStatus(v1.Object) (appsv1.DeploymentStatus, error) + GetScheme() *runtime.Scheme +} + +//go:generate counterfeiter -o mocks/initializer.go -fake-name InitializeIBPPeer . InitializeIBPPeer + +type InitializeIBPPeer interface { + GenerateOrdererCACertsSecret(instance *current.IBPPeer, certs map[string][]byte) error + GenerateSecrets(prefix commoninit.SecretType, instance v1.Object, crypto *commonconfig.Response) error + Create(initializer.CoreConfig, initializer.IBPPeer, string) (*initializer.Response, error) + Update(initializer.CoreConfig, initializer.IBPPeer) (*initializer.Response, error) + CheckIfAdminCertsUpdated(*current.IBPPeer) (bool, error) + UpdateAdminSecret(*current.IBPPeer) error + MissingCrypto(*current.IBPPeer) bool + GetInitPeer(instance *current.IBPPeer, storagePath string) (*initializer.Peer, error) + GetUpdatedPeer(instance *current.IBPPeer) (*initializer.Peer, error) + GenerateSecretsFromResponse(instance *current.IBPPeer, cryptoResponse *commonconfig.CryptoResponse) error + UpdateSecretsFromResponse(instance *current.IBPPeer, cryptoResponse *commonconfig.CryptoResponse) error + GetCrypto(instance *current.IBPPeer) (*commonconfig.CryptoResponse, error) + CoreConfigMap() *initializer.CoreConfigMap +} + +//go:generate counterfeiter -o mocks/certificate_manager.go -fake-name CertificateManager . CertificateManager + +type CertificateManager interface { + CheckCertificatesForExpire(instance v1.Object, numSecondsBeforeExpire int64) (current.IBPCRStatusType, string, error) + GetSignCert(string, string) ([]byte, error) + GetDurationToNextRenewal(commoninit.SecretType, v1.Object, int64) (time.Duration, error) + RenewCert(commoninit.SecretType, certificate.Instance, *current.EnrollmentSpec, *commonapi.BCCSP, string, bool, bool) error +} + +//go:generate counterfeiter -o mocks/restart_manager.go -fake-name RestartManager . RestartManager + +type RestartManager interface { + ForAdminCertUpdate(instance v1.Object) error + ForCertUpdate(certType commoninit.SecretType, instance v1.Object) error + ForConfigOverride(instance v1.Object) error + ForNodeOU(instance v1.Object) error + ForRestartAction(instance v1.Object) error + TriggerIfNeeded(instance restart.Instance) error +} + +//go:generate counterfeiter -o mocks/update.go -fake-name Update . Update +type Update interface { + SpecUpdated() bool + ConfigOverridesUpdated() bool + DindArgsUpdated() bool + TLSCertUpdated() bool + EcertUpdated() bool + PeerTagUpdated() bool + CertificateUpdated() bool + SetDindArgsUpdated(updated bool) + RestartNeeded() bool + EcertReenrollNeeded() bool + TLSReenrollNeeded() bool + EcertNewKeyReenroll() bool + TLScertNewKeyReenroll() bool + MigrateToV2() bool + MigrateToV24() bool + UpgradeDBs() bool + MSPUpdated() bool + EcertEnroll() bool + TLSCertEnroll() bool + CertificateCreated() bool + GetCreatedCertType() commoninit.SecretType + CryptoBackupNeeded() bool + NodeOUUpdated() bool + FabricVersionUpdated() bool + ImagesUpdated() bool +} + +type IBPPeer interface { + Initialize(instance *current.IBPPeer, update Update) error + CheckStates(instance *current.IBPPeer) error + PreReconcileChecks(instance *current.IBPPeer, update Update) (bool, error) + ReconcileManagers(instance *current.IBPPeer, update Update) error + Reconcile(instance *current.IBPPeer, update Update) (common.Result, error) +} + +type CoreConfig interface { + GetMaxNameLength() *int + GetAddressOverrides() []peerconfig.AddressOverride + GetBCCSPSection() *commonapi.BCCSP + MergeWith(interface{}, bool) error + SetPKCS11Defaults(bool) + ToBytes() ([]byte, error) + UsingPKCS11() bool + SetBCCSPLibrary(string) +} + +var _ IBPPeer = &Peer{} + +type Peer struct { + Client controllerclient.Client + Scheme *runtime.Scheme + Config *config.Config + + DeploymentManager DeploymentManager + ServiceManager resources.Manager + PVCManager resources.Manager + StateDBPVCManager resources.Manager + FluentDConfigMapManager resources.Manager + RoleManager resources.Manager + RoleBindingManager resources.Manager + ServiceAccountManager resources.Manager + + Override Override + Initializer InitializeIBPPeer + + CertificateManager CertificateManager + RenewCertTimers map[string]*time.Timer + + Restart RestartManager +} + +func New(client controllerclient.Client, scheme *runtime.Scheme, config *config.Config, o Override) *Peer { + p := &Peer{ + Client: client, + Scheme: scheme, + Config: config, + Override: o, + } + + p.CreateManagers() + + validator := &validator.Validator{ + Client: client, + } + + init := initializer.New(config.PeerInitConfig, scheme, client, p.GetLabels, validator, config.Operator.Peer.Timeouts.EnrollJob) + p.Initializer = init + + p.CertificateManager = certificate.New(client, scheme) + p.RenewCertTimers = make(map[string]*time.Timer) + + p.Restart = restart.New(client, config.Operator.Restart.WaitTime.Get(), config.Operator.Restart.Timeout.Get()) + + return p +} + +func (p *Peer) CreateManagers() { + override := p.Override + resourceManager := resourcemanager.New(p.Client, p.Scheme) + peerConfig := p.Config.PeerInitConfig + + p.DeploymentManager = resourceManager.CreateDeploymentManager("", override.Deployment, p.GetLabels, peerConfig.DeploymentFile) + p.PVCManager = resourceManager.CreatePVCManager("", override.PVC, p.GetLabels, peerConfig.PVCFile) + p.StateDBPVCManager = resourceManager.CreatePVCManager("statedb", override.StateDBPVC, p.GetLabels, peerConfig.CouchDBPVCFile) + p.FluentDConfigMapManager = resourceManager.CreateConfigMapManager("fluentd", nil, p.GetLabels, peerConfig.FluentdConfigMapFile, nil) + p.RoleManager = resourceManager.CreateRoleManager("", nil, p.GetLabels, peerConfig.RoleFile) + p.RoleBindingManager = resourceManager.CreateRoleBindingManager("", nil, p.GetLabels, peerConfig.RoleBindingFile) + p.ServiceAccountManager = resourceManager.CreateServiceAccountManager("", nil, p.GetLabels, peerConfig.ServiceAccountFile) + p.ServiceManager = resourceManager.CreateServiceManager("", override.Service, p.GetLabels, peerConfig.ServiceFile) +} + +func (p *Peer) PreReconcileChecks(instance *current.IBPPeer, update Update) (bool, error) { + var maxNameLength *int + + imagesUpdated, err := reconcilechecks.FabricVersionHelper(instance, p.Config.Operator.Versions, update) + if err != nil { + return false, errors.Wrap(err, "failed to during version and image checks") + } + + co, err := instance.GetConfigOverride() + if err != nil { + return false, err + } + + configOverride := co.(CoreConfig) + maxNameLength = configOverride.GetMaxNameLength() + + err = util.ValidationChecks(instance.TypeMeta, instance.ObjectMeta, "IBPPeer", maxNameLength) + if err != nil { + return false, err + } + + if instance.Spec.Action.Enroll.Ecert && instance.Spec.Action.Reenroll.Ecert { + return false, errors.New("both enroll and renenroll action requested for ecert, must only select one") + } + + if instance.Spec.Action.Enroll.TLSCert && instance.Spec.Action.Reenroll.TLSCert { + return false, errors.New("both enroll and renenroll action requested for TLS cert, must only select one") + } + + if instance.Spec.Action.Reenroll.Ecert && instance.Spec.Action.Reenroll.EcertNewKey { + return false, errors.New("both reenroll and renenroll with new action requested for ecert, must only select one") + } + + if instance.Spec.Action.Reenroll.TLSCert && instance.Spec.Action.Reenroll.TLSCertNewKey { + return false, errors.New("both reenroll and renenroll with new action requested for TLS cert, must only select one") + } + + if instance.Spec.HSMSet() { + err = util.ValidateHSMProxyURL(instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return false, errors.Wrapf(err, "invalid HSM endpoint for peer instance '%s'", instance.GetName()) + } + } + + hsmImageUpdated := p.ReconcileHSMImages(instance) + + if !instance.Spec.DomainSet() { + return false, fmt.Errorf("domain not set for peer instance '%s'", instance.GetName()) + } + + zoneUpdated, err := p.SelectZone(instance) + if err != nil { + return false, err + } + + regionUpdated, err := p.SelectRegion(instance) + if err != nil { + return false, err + } + + var replicasUpdated bool + if instance.Spec.Replicas == nil { + replicas := int32(1) + instance.Spec.Replicas = &replicas + replicasUpdated = true + } + + dbTypeUpdated := p.CheckDBType(instance) + updated := dbTypeUpdated || zoneUpdated || regionUpdated || update.DindArgsUpdated() || hsmImageUpdated || replicasUpdated || imagesUpdated + + if updated { + log.Info(fmt.Sprintf( + "dbTypeUpdate %t, zoneUpdated %t, regionUpdated %t, dindArgsUpdated %t, hsmImageUpdated %t, replicasUpdated %t, imagesUpdated %t", + dbTypeUpdated, + zoneUpdated, + regionUpdated, + update.DindArgsUpdated(), + hsmImageUpdated, + replicasUpdated, + imagesUpdated)) + } + + return updated, nil +} + +func (p *Peer) SetVersion(instance *current.IBPPeer) (bool, error) { + if instance.Status.Version == "" || !version.String(instance.Status.Version).Equal(version.Operator) { + log.Info("Version of Operator: ", "version", version.Operator) + log.Info("Version of CR: ", "version", instance.Status.Version) + log.Info(fmt.Sprintf("Updating CR '%s' to version '%s'", instance.Name, version.Operator)) + + instance.Status.Version = version.Operator + err := p.Client.PatchStatus(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (p *Peer) Initialize(instance *current.IBPPeer, update Update) error { + var err error + + log.Info(fmt.Sprintf("Checking if peer '%s' needs initialization", instance.GetName())) + + // TODO: Add checks to determine if initialization is neeeded. Split this method into + // two, one should handle initialization during the create event of a CR and the other + // should update events + + // Service account is required by HSM init job + if err := p.ReconcilePeerRBAC(instance); err != nil { + return err + } + + if instance.IsHSMEnabled() { + // If HSM config not found, HSM proxy is being used + if instance.UsingHSMProxy() { + err = os.Setenv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return err + } + } else { + hsmConfig, err := commonconfig.ReadHSMConfig(p.Client, instance) + if err != nil { + return errors.New("using non-proxy HSM, but no HSM config defined as config map 'ibp-hsm-config'") + } + + if hsmConfig.Daemon != nil { + log.Info("Using daemon based HSM, creating pvc...") + p.PVCManager.SetCustomName(instance.Spec.CustomNames.PVC.Peer) + err = p.PVCManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed PVC reconciliation") + } + } + } + } + + peerConfig := p.Config.PeerInitConfig.CorePeerFile + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + peerConfig = p.Config.PeerInitConfig.CorePeerV2File + } + + if instance.UsingHSMProxy() { + err = os.Setenv("PKCS11_PROXY_SOCKET", instance.Spec.HSM.PKCS11Endpoint) + if err != nil { + return err + } + } + + storagePath := p.GetInitStoragePath(instance) + initPeer, err := p.Initializer.GetInitPeer(instance, storagePath) + if err != nil { + return err + } + initPeer.UsingHSMProxy = instance.UsingHSMProxy() + initPeer.Config, err = initializer.GetCoreConfigFromFile(instance, peerConfig) + if err != nil { + return err + } + + updated := update.ConfigOverridesUpdated() || update.NodeOUUpdated() + if update.ConfigOverridesUpdated() { + err = p.InitializeUpdateConfigOverride(instance, initPeer) + if err != nil { + return err + } + // Request deployment restart for config override update + if err = p.Restart.ForConfigOverride(instance); err != nil { + return err + } + } + if update.NodeOUUpdated() { + err = p.InitializeUpdateNodeOU(instance) + if err != nil { + return err + } + // Request deloyment restart for node OU update + if err = p.Restart.ForNodeOU(instance); err != nil { + return err + } + } + if !updated { + err = p.InitializeCreate(instance, initPeer) + if err != nil { + return err + } + } + + updateNeeded, err := p.Initializer.CheckIfAdminCertsUpdated(instance) + if err != nil { + return err + } + if updateNeeded { + err = p.Initializer.UpdateAdminSecret(instance) + if err != nil { + return err + } + // Request deployment restart for admin cert updates + if err = p.Restart.ForAdminCertUpdate(instance); err != nil { + return err + } + } + + return nil +} + +func (p *Peer) InitializeUpdateConfigOverride(instance *current.IBPPeer, initPeer *initializer.Peer) error { + var err error + + if p.Initializer.MissingCrypto(instance) { + // If crypto is missing, we should run the create logic + err := p.InitializeCreate(instance, initPeer) + if err != nil { + return err + } + + return nil + } + + log.Info(fmt.Sprintf("Initialize peer '%s' during update config override", instance.GetName())) + + cm, err := initializer.GetCoreFromConfigMap(p.Client, instance) + if err != nil { + return err + } + + initPeer.Config, err = initializer.GetCoreConfigFromBytes(instance, cm.BinaryData["core.yaml"]) + if err != nil { + return err + } + + co, err := instance.GetConfigOverride() + if err != nil { + return err + } + configOverrides := co.(CoreConfig) + + resp, err := p.Initializer.Update(configOverrides, initPeer) + if err != nil { + return err + } + + if resp != nil { + if resp.Config != nil { + // Update core.yaml in config map + err = p.Initializer.CoreConfigMap().CreateOrUpdate(instance, resp.Config) + if err != nil { + return err + } + } + + if len(resp.DeliveryClientCerts) > 0 { + log.Info(fmt.Sprintf("Orderer CA Certs detected in DeliveryClient config, creating secret '%s-orderercacerts' with certs", instance.Name)) + err = p.Initializer.GenerateOrdererCACertsSecret(instance, resp.DeliveryClientCerts) + if err != nil { + return err + } + } + } + + return nil +} + +func (p *Peer) InitializeUpdateNodeOU(instance *current.IBPPeer) error { + log.Info(fmt.Sprintf("Node OU updated with enabled: %t for peer '%s", !instance.Spec.NodeOUDisabled(), instance.GetName())) + + crypto, err := p.Initializer.GetCrypto(instance) + if err != nil { + return err + } + if !instance.Spec.NodeOUDisabled() { + if err := crypto.VerifyCertOU("peer"); err != nil { + return err + } + } else { + // If nodeOUDisabled, admin certs are required + if crypto.Enrollment.AdminCerts == nil { + return errors.New("node OU disabled, admin certs are required but missing") + } + } + + // Update config.yaml in config map + err = p.Initializer.CoreConfigMap().AddNodeOU(instance) + if err != nil { + return err + } + + return nil +} + +func (p *Peer) InitializeCreate(instance *current.IBPPeer, initPeer *initializer.Peer) error { + var err error + + if p.ConfigExists(instance) { + log.Info(fmt.Sprintf("Config '%s-config' exists, not reinitializing peer", instance.GetName())) + return nil + } + + log.Info(fmt.Sprintf("Initialize peer '%s' during create", instance.GetName())) + + storagePath := p.GetInitStoragePath(instance) + + co, err := instance.GetConfigOverride() + if err != nil { + return err + } + configOverrides := co.(CoreConfig) + + resp, err := p.Initializer.Create(configOverrides, initPeer, storagePath) + if err != nil { + return err + } + + if resp != nil { + if resp.Crypto != nil { + if !instance.Spec.NodeOUDisabled() { + if err := resp.Crypto.VerifyCertOU("peer"); err != nil { + return err + } + } + + err = p.Initializer.GenerateSecretsFromResponse(instance, resp.Crypto) + if err != nil { + return err + } + } + + if len(resp.DeliveryClientCerts) > 0 { + log.Info(fmt.Sprintf("Orderer CA Certs detected in DeliverClient config, creating secret '%s-orderercacerts' with certs", instance.Name)) + err = p.Initializer.GenerateOrdererCACertsSecret(instance, resp.DeliveryClientCerts) + if err != nil { + return err + } + } + + if resp.Config != nil { + if instance.IsHSMEnabled() && !instance.UsingHSMProxy() { + hsmConfig, err := commonconfig.ReadHSMConfig(p.Client, instance) + if err != nil { + return err + } + resp.Config.SetBCCSPLibrary(filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath))) + } + + err = p.Initializer.CoreConfigMap().CreateOrUpdate(instance, resp.Config) + if err != nil { + return err + } + } + } + + return nil +} + +func (p *Peer) Reconcile(instance *current.IBPPeer, update Update) (common.Result, error) { + var err error + var status *current.CRStatus + + versionSet, err := p.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := p.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + // We do not have to wait for service to get the external endpoint + // thus we call UpdateExternalEndpoint in reconcile before reconcile managers + externalEndpointUpdated := p.UpdateExternalEndpoint(instance) + + if instanceUpdated || externalEndpointUpdated { + log.Info(fmt.Sprintf("Updating instance after pre reconcile checks: %t, updating external endpoint: %t", instanceUpdated, externalEndpointUpdated)) + err = p.Client.Patch(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + err = p.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.PeerInitilizationFailed, "failed to initialize peer") + } + + err = p.ReconcileManagers(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = p.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = p.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + // custom product logic can be implemented here + // No-Op atm + status, result, err := p.CustomLogic(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to run custom offering logic") + } + if result != nil { + log.Info(fmt.Sprintf("Finished reconciling '%s' with Custom Logic result", instance.GetName())) + return *result, nil + } + + if update.MSPUpdated() { + err = p.UpdateMSPCertificates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update certificates passed in MSP spec") + } + // A successful update will trigger a tlsCertUpdated or ecertUpdated event, which will handle restarting deployment + } + + if update.EcertUpdated() { + log.Info("Ecert was updated") + // Request deployment restart for tls cert update + err = p.Restart.ForCertUpdate(commoninit.ECERT, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.TLSCertUpdated() { + log.Info("TLS cert was updated") + // Request deployment restart for ecert update + err = p.Restart.ForCertUpdate(commoninit.TLS, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if err := p.HandleActions(instance, update); err != nil { + return common.Result{}, err + } + + if err := p.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (p *Peer) ReconcileManagers(instance *current.IBPPeer, updated Update) error { + var err error + + update := updated.SpecUpdated() + + p.PVCManager.SetCustomName(instance.Spec.CustomNames.PVC.Peer) + err = p.PVCManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed PVC reconciliation") + } + + p.StateDBPVCManager.SetCustomName(instance.Spec.CustomNames.PVC.StateDB) + err = p.StateDBPVCManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed CouchDB PVC reconciliation") + } + + err = p.ReconcileSecret(instance) + if err != nil { + return errors.Wrap(err, "failed Secret reconciliation") + } + + err = p.ServiceManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Service reconciliation") + } + + err = p.DeploymentManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Deployment reconciliation") + } + + err = p.ReconcilePeerRBAC(instance) + if err != nil { + return errors.Wrap(err, "failed RBAC reconciliation") + } + + err = p.FluentDConfigMapManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed FluentD ConfigMap reconciliation") + } + + return nil +} + +func (p *Peer) ReconcilePeerRBAC(instance *current.IBPPeer) error { + var err error + + err = p.RoleManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = p.RoleBindingManager.Reconcile(instance, false) + if err != nil { + return err + } + + err = p.ServiceAccountManager.Reconcile(instance, false) + if err != nil { + return err + } + + return nil +} + +// this function makes sure the deployment spec matches the expected state +func (p *Peer) CheckStates(instance *current.IBPPeer) error { + if p.DeploymentManager.Exists(instance) { + err := p.DeploymentManager.CheckState(instance) + if err != nil { + log.Error(err, "unexpected state") + err = p.DeploymentManager.RestoreState(instance) + if err != nil { + return err + } + } + } + + return nil +} + +func (p *Peer) ReconcileSecret(instance *current.IBPPeer) error { + name := instance.Spec.MSPSecret + if name == "" { + name = instance.Name + "-secret" // default value for secret, if none specified + } + + secret := &corev1.Secret{} + err := p.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: instance.Namespace}, secret) + if err != nil { + if k8serrors.IsNotFound(err) { + log.Info(fmt.Sprintf("Creating secret '%s'", name)) + createErr := p.CreateSecret(instance) + if createErr != nil { + return createErr + } + return nil + } + return err + } + + // TODO: If needed, update logic goes here + + return nil +} + +func (p *Peer) CreateSecret(instance *current.IBPPeer) error { + secret := &corev1.Secret{} + secret.Name = instance.Spec.MSPSecret + if secret.Name == "" { + secret.Name = instance.Name + "-secret" // default value for secret, if none specified + } + secret.Namespace = instance.Namespace + secret.Labels = p.GetLabels(instance) + + secretData := instance.Spec.Secret + bytesData, err := json.Marshal(secretData) + if err != nil { + return err + } + secret.Data = make(map[string][]byte) + secret.Data["secret.json"] = bytesData + + err = p.Client.Create(context.TODO(), secret, controllerclient.CreateOption{Owner: instance, Scheme: p.Scheme}) + if err != nil { + return err + } + + return nil +} + +func (p *Peer) UpdateExternalEndpoint(instance *current.IBPPeer) bool { + if instance.Spec.PeerExternalEndpoint == "" { + instance.Spec.PeerExternalEndpoint = instance.Namespace + "-" + instance.Name + "-peer." + instance.Spec.Domain + ":443" + return true + } + return false +} + +func (p *Peer) SelectZone(instance *current.IBPPeer) (bool, error) { + if instance.Spec.Zone == "select" { + zone := util.GetZone(p.Client) + instance.Spec.Zone = zone + log.Info(fmt.Sprintf("Setting zone to '%s', and updating spec", zone)) + return true, nil + } + if instance.Spec.Zone != "" { + err := util.ValidateZone(p.Client, instance.Spec.Zone) + if err != nil { + return false, err + } + } + return false, nil +} + +func (p *Peer) SelectRegion(instance *current.IBPPeer) (bool, error) { + if instance.Spec.Region == "select" { + region := util.GetRegion(p.Client) + instance.Spec.Region = region + log.Info(fmt.Sprintf("Setting region to '%s', and updating spec", region)) + return true, nil + } + if instance.Spec.Region != "" { + err := util.ValidateRegion(p.Client, instance.Spec.Region) + if err != nil { + return false, err + } + } + return false, nil +} + +func (p *Peer) CheckDBType(instance *current.IBPPeer) bool { + if instance.Spec.StateDb == "" { + log.Info("Setting statedb type to 'CouchDB', and updating spec") + instance.Spec.StateDb = "CouchDB" + return true + } + + return false +} + +func (p *Peer) GetLabels(instance v1.Object) map[string]string { + label := os.Getenv("OPERATOR_LABEL_PREFIX") + if label == "" { + label = "fabric" + } + + i := instance.(*current.IBPPeer) + return map[string]string{ + "app": instance.GetName(), + "creator": label, + "orgname": i.Spec.MSPID, + "app.kubernetes.io/name": label, + "app.kubernetes.io/instance": label + "peer", + "app.kubernetes.io/managed-by": label + "-operator", + } +} + +func (p *Peer) UpdateConnectionProfile(instance *current.IBPPeer) error { + var err error + + endpoints := p.GetEndpoints(instance) + + tlscert, err := common.GetTLSSignCertEncoded(p.Client, instance) + if err != nil { + return err + } + + tlscacerts, err := common.GetTLSCACertEncoded(p.Client, instance) + if err != nil { + return err + } + + tlsintercerts, err := common.GetTLSIntercertEncoded(p.Client, instance) + if err != nil { + return err + } + + ecert, err := common.GetEcertSignCertEncoded(p.Client, instance) + if err != nil { + return err + } + + cacert, err := common.GetEcertCACertEncoded(p.Client, instance) + if err != nil { + return err + } + + admincerts, err := common.GetEcertAdmincertEncoded(p.Client, instance) + if err != nil { + return err + } + + if len(tlsintercerts) > 0 { + tlscacerts = tlsintercerts + } + + err = p.UpdateConnectionProfileConfigmap(instance, *endpoints, tlscert, tlscacerts, ecert, cacert, admincerts) + if err != nil { + return err + } + + return nil +} + +func (p *Peer) UpdateConnectionProfileConfigmap(instance *current.IBPPeer, endpoints current.PeerEndpoints, tlscert string, tlscacerts []string, ecert string, cacert []string, admincerts []string) error { + // TODO add ecert.intermediatecerts and ecert.admincerts + // TODO add tls.cacerts + // TODO get the whole PeerConnectionProfile object from caller?? + name := instance.Name + "-connection-profile" + connectionProfile := ¤t.PeerConnectionProfile{ + Endpoints: endpoints, + TLS: ¤t.MSP{ + SignCerts: tlscert, + CACerts: tlscacerts, + }, + Component: ¤t.MSP{ + SignCerts: ecert, + CACerts: cacert, + AdminCerts: admincerts, + }, + } + + bytes, err := json.Marshal(connectionProfile) + if err != nil { + return errors.Wrap(err, "failed to marshal connectionprofile") + } + cm := &corev1.ConfigMap{ + BinaryData: map[string][]byte{"profile.json": bytes}, + } + cm.Name = name + cm.Namespace = instance.Namespace + cm.Labels = p.GetLabels(instance) + + nn := types.NamespacedName{ + Name: name, + Namespace: instance.GetNamespace(), + } + + err = p.Client.Get(context.TODO(), nn, &corev1.ConfigMap{}) + if err == nil { + log.Info(fmt.Sprintf("Updating connection profle configmap for %s", instance.Name)) + err = p.Client.Update(context.TODO(), cm, controllerclient.UpdateOption{ + Owner: instance, + Scheme: p.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to update connection profile configmap") + } + } else { + log.Info(fmt.Sprintf("Creating connection profle configmap for %s", instance.Name)) + err = p.Client.Create(context.TODO(), cm, controllerclient.CreateOption{ + Owner: instance, + Scheme: p.Scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create connection profile configmap") + } + } + return nil +} + +func (p *Peer) GetEndpoints(instance *current.IBPPeer) *current.PeerEndpoints { + endpoints := ¤t.PeerEndpoints{ + API: "grpcs://" + instance.Namespace + "-" + instance.Name + "-peer." + instance.Spec.Domain + ":443", + Operations: "https://" + instance.Namespace + "-" + instance.Name + "-operations." + instance.Spec.Domain + ":443", + Grpcweb: "https://" + instance.Namespace + "-" + instance.Name + "-grpcweb." + instance.Spec.Domain + ":443", + } + return endpoints +} + +func (p *Peer) ConfigExists(instance *current.IBPPeer) bool { + name := fmt.Sprintf("%s-config", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: name, + Namespace: instance.Namespace, + } + + cm := &corev1.ConfigMap{} + err := p.Client.Get(context.TODO(), namespacedName, cm) + if err != nil { + return false + } + + return true +} + +func (p *Peer) CheckCSRHosts(instance *current.IBPPeer, hosts []string) bool { + if instance.Spec.Secret != nil { + if instance.Spec.Secret.Enrollment != nil { + if instance.Spec.Secret.Enrollment.TLS == nil { + instance.Spec.Secret.Enrollment.TLS = ¤t.Enrollment{} + } + if instance.Spec.Secret.Enrollment.TLS.CSR == nil { + instance.Spec.Secret.Enrollment.TLS.CSR = ¤t.CSR{} + instance.Spec.Secret.Enrollment.TLS.CSR.Hosts = hosts + return true + } else { + originalLength := len(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts) + for _, host := range instance.Spec.Secret.Enrollment.TLS.CSR.Hosts { + hosts = util.AppendStringIfMissing(hosts, host) + } + instance.Spec.Secret.Enrollment.TLS.CSR.Hosts = hosts + newLength := len(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts) + return originalLength != newLength + } + } + } + return false +} + +func (p *Peer) GetBCCSPSectionForInstance(instance *current.IBPPeer) (*commonapi.BCCSP, error) { + var bccsp *commonapi.BCCSP + if instance.IsHSMEnabled() { + co, err := instance.GetConfigOverride() + if err != nil { + return nil, errors.Wrap(err, "failed to get configoverride") + } + + configOverride := co.(CoreConfig) + configOverride.SetPKCS11Defaults(instance.UsingHSMProxy()) + bccsp = configOverride.GetBCCSPSection() + } + + return bccsp, nil +} + +func (p *Peer) GetInitStoragePath(instance *current.IBPPeer) string { + if p.Config != nil && p.Config.PeerInitConfig != nil && p.Config.PeerInitConfig.StoragePath != "" { + return filepath.Join(p.Config.PeerInitConfig.StoragePath, instance.GetName()) + } + + return filepath.Join("/", "peerinit", instance.GetName()) +} + +func (p *Peer) ReconcileFabricPeerMigrationV1_4(instance *current.IBPPeer) error { + peerConfig, err := p.FabricPeerMigrationV1_4(instance) + if err != nil { + return errors.Wrap(err, "failed to migrate peer between fabric versions") + } + + if peerConfig != nil { + log.Info("Peer config updated during fabric peer migration, updating config map...") + if err := p.Initializer.CoreConfigMap().CreateOrUpdate(instance, peerConfig); err != nil { + return errors.Wrapf(err, "failed to create/update '%s' peer's config map", instance.GetName()) + } + } + + return nil +} + +// Moving to fabric version above 1.4.6 require that the `msp/keystore` value be removed +// from BCCSP section if configured to use PKCS11 (HSM). NOTE: This does not support +// migration across major release, will not cover migration peer from 1.4.x to 2.x +func (p *Peer) FabricPeerMigrationV1_4(instance *current.IBPPeer) (*peerconfig.Core, error) { + if !instance.IsHSMEnabled() { + return nil, nil + } + + peerTag := instance.Spec.Images.PeerTag + if !strings.Contains(peerTag, "sha") { + tag := strings.Split(peerTag, "-")[0] + + peerVersion := version.String(tag) + if !peerVersion.GreaterThan(version.V1_4_6) { + return nil, nil + } + + log.Info(fmt.Sprintf("Peer moving to fabric version %s", peerVersion)) + } else { + if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + return nil, nil + } + log.Info(fmt.Sprintf("Peer moving to digest %s", peerTag)) + } + + // Read peer config map and remove keystore value from BCCSP section + cm, err := initializer.GetCoreFromConfigMap(p.Client, instance) + if err != nil { + return nil, errors.Wrapf(err, "failed to get '%s' peer's config map", instance.GetName()) + } + + peerConfig := &peerconfig.Core{} + if err := yaml.Unmarshal(cm.BinaryData["core.yaml"], peerConfig); err != nil { + return nil, errors.Wrap(err, "invalid peer config") + } + + // If already nil, don't need to proceed further as config updates are not required + if peerConfig.Peer.BCCSP.PKCS11.FileKeyStore == nil { + return nil, nil + } + + peerConfig.Peer.BCCSP.PKCS11.FileKeyStore = nil + + return peerConfig, nil +} + +func (p *Peer) ReconcileFabricPeerMigrationV2_0(instance *current.IBPPeer) error { + log.Info("Migration to V2 requested, checking if migration is needed") + + migrator := &v2.Migrate{ + DeploymentManager: p.DeploymentManager, + ConfigMapManager: &initializer.CoreConfigMap{Config: p.Config.PeerInitConfig, Scheme: p.Scheme, GetLabels: p.GetLabels, Client: p.Client}, + Client: p.Client, + } + + if err := fabric.V2Migrate(instance, migrator, instance.Spec.FabricVersion, p.Config.Operator.Peer.Timeouts.DBMigration); err != nil { + return err + } + + return nil +} + +func (p *Peer) ReconcileFabricPeerMigrationV2_4(instance *current.IBPPeer) error { + log.Info("Migration to V2.4.x requested, checking if migration is needed") + + migrator := &v2.Migrate{ + DeploymentManager: p.DeploymentManager, + ConfigMapManager: &initializer.CoreConfigMap{Config: p.Config.PeerInitConfig, Scheme: p.Scheme, GetLabels: p.GetLabels, Client: p.Client}, + Client: p.Client, + } + + if err := fabric.V24Migrate(instance, migrator, instance.Spec.FabricVersion, p.Config.Operator.Peer.Timeouts.DBMigration); err != nil { + return err + } + + return nil +} + +func (p *Peer) HandleMigrationJobs(listOpt k8sclient.ListOption, instance *current.IBPPeer) (bool, error) { + status, job, err := p.CheckForRunningJobs(listOpt) + if err != nil { + return false, err + } + + switch status { + case RUNNING: + return true, nil + case COMPLETED: + jobName := job.GetName() + log.Info(fmt.Sprintf("Migration job '%s' completed, cleaning up...", jobName)) + + migrationJob := &batchv1.Job{ + ObjectMeta: v1.ObjectMeta{ + Name: jobName, + Namespace: instance.GetNamespace(), + }, + } + + if err := p.Client.Delete(context.TODO(), migrationJob); err != nil { + return false, errors.Wrap(err, "failed to delete migration job after completion") + } + + // TODO: Need to investigate why job is not adding controller reference to job pod, + // this manual cleanup should not be required + podList := &corev1.PodList{} + if err := p.Client.List(context.TODO(), podList, k8sclient.MatchingLabels{"job-name": jobName}); err != nil { + return false, errors.Wrap(err, "failed to list db migraton pods") + } + + if len(podList.Items) == 1 { + if err := p.Client.Delete(context.TODO(), &podList.Items[0]); err != nil { + return false, errors.Wrap(err, "failed to delete db migration pod") + } + } + + if instance.UsingCouchDB() { + couchDBPod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-couchdb", instance.GetName()), + Namespace: instance.GetNamespace(), + }, + } + + if err := p.Client.Delete(context.TODO(), couchDBPod); err != nil { + return false, errors.Wrap(err, "failed to delete couchdb pod") + } + } + + return false, nil + default: + return false, nil + } +} + +type JobStatus string + +const ( + COMPLETED JobStatus = "completed" + RUNNING JobStatus = "running" + NOTFOUND JobStatus = "not-found" + UNKNOWN JobStatus = "unknown" +) + +func (p *Peer) CheckForRunningJobs(listOpt k8sclient.ListOption) (JobStatus, *jobv1.Job, error) { + jobList := &batchv1.JobList{} + if err := p.Client.List(context.TODO(), jobList, listOpt); err != nil { + return NOTFOUND, nil, nil + } + + if len(jobList.Items) == 0 { + return NOTFOUND, nil, nil + } + + // There should only be one job that is triggered per migration request + k8sJob := jobList.Items[0] + job := jobv1.NewWithDefaultsUseExistingName(&k8sJob) + + if len(job.Job.Status.Conditions) > 0 { + cond := job.Job.Status.Conditions[0] + if cond.Type == batchv1.JobFailed { + log.Info(fmt.Sprintf("Job '%s' failed for reason: %s: %s", job.Name, cond.Reason, cond.Message)) + } + } + + completed, err := job.ContainerFinished(p.Client, "dbmigration") + if err != nil { + return UNKNOWN, nil, err + } + + if completed { + return COMPLETED, job, nil + + } + + return RUNNING, nil, nil +} + +func (p *Peer) UpgradeDBs(instance *current.IBPPeer) error { + log.Info("Upgrade DBs action requested") + if err := action.UpgradeDBs(p.DeploymentManager, p.Client, instance, p.Config.Operator.Peer.Timeouts.DBMigration); err != nil { + return errors.Wrap(err, "failed to reset peer") + } + orig := instance.DeepCopy() + + instance.Spec.Action.UpgradeDBs = false + if err := p.Client.Patch(context.TODO(), instance, k8sclient.MergeFrom(orig)); err != nil { + return errors.Wrap(err, "failed to reset reenroll action flag") + } + + return nil +} + +func (p *Peer) EnrollForEcert(instance *current.IBPPeer) error { + log.Info(fmt.Sprintf("Ecert enroll triggered via action parameter for '%s'", instance.GetName())) + + secret := instance.Spec.Secret + if secret == nil || secret.Enrollment == nil || secret.Enrollment.Component == nil { + return errors.New("unable to enroll, no ecert enrollment information provided") + } + ecertSpec := secret.Enrollment.Component + + storagePath := filepath.Join(p.GetInitStoragePath(instance), "ecert") + crypto, err := action.Enroll(instance, ecertSpec, storagePath, p.Client, p.Scheme, true, p.Config.Operator.Peer.Timeouts.EnrollJob) + if err != nil { + return errors.Wrap(err, "failed to enroll for ecert") + } + + err = p.Initializer.GenerateSecrets("ecert", instance, crypto) + if err != nil { + return errors.Wrap(err, "failed to generate ecert secrets") + } + + return nil +} + +func (p *Peer) EnrollForTLSCert(instance *current.IBPPeer) error { + log.Info(fmt.Sprintf("TLS cert enroll triggered via action parameter for '%s'", instance.GetName())) + + secret := instance.Spec.Secret + if secret == nil || secret.Enrollment == nil || secret.Enrollment.TLS == nil { + return errors.New("unable to enroll, no TLS enrollment information provided") + } + tlscertSpec := secret.Enrollment.TLS + + storagePath := filepath.Join(p.GetInitStoragePath(instance), "tls") + crypto, err := action.Enroll(instance, tlscertSpec, storagePath, p.Client, p.Scheme, false, p.Config.Operator.Peer.Timeouts.EnrollJob) + if err != nil { + return errors.Wrap(err, "failed to enroll for TLS cert") + } + + err = p.Initializer.GenerateSecrets("tls", instance, crypto) + if err != nil { + return errors.Wrap(err, "failed to generate ecert secrets") + } + + return nil +} + +func (p *Peer) ReconcileHSMImages(instance *current.IBPPeer) bool { + hsmConfig, err := commonconfig.ReadHSMConfig(p.Client, instance) + if err != nil { + return false + } + + if hsmConfig.Library.AutoUpdateDisabled { + return false + } + + updated := false + if hsmConfig.Library.Image != "" { + hsm := strings.Split(hsmConfig.Library.Image, ":") + image := hsm[0] + tag := hsm[1] + + if instance.Spec.Images.HSMImage != image { + instance.Spec.Images.HSMImage = image + updated = true + } + + if instance.Spec.Images.HSMTag != tag { + instance.Spec.Images.HSMTag = tag + updated = true + } + } + + return updated +} + +func (p *Peer) HandleActions(instance *current.IBPPeer, update Update) error { + orig := instance.DeepCopy() + + if update.EcertReenrollNeeded() { + if err := p.ReenrollEcert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetEcertReenroll() + return err + } + instance.ResetEcertReenroll() + } + + if update.TLSReenrollNeeded() { + if err := p.ReenrollTLSCert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSReenroll() + return err + } + instance.ResetTLSReenroll() + } + + if update.EcertNewKeyReenroll() { + if err := p.ReenrollEcertNewKey(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetEcertReenroll() + return err + } + instance.ResetEcertReenroll() + } + + if update.TLScertNewKeyReenroll() { + if err := p.ReenrollTLSCertNewKey(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSReenroll() + return err + } + instance.ResetTLSReenroll() + } + + if update.EcertEnroll() { + if err := p.EnrollForEcert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetEcertEnroll() + return err + } + instance.ResetEcertEnroll() + } + + if update.TLSCertEnroll() { + if err := p.EnrollForTLSCert(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetTLSEnroll() + return err + } + instance.ResetTLSEnroll() + } + + // Upgrade DBs needs to be one of the last thing that should be performed to allow for other + // update flags to be processed + if update.UpgradeDBs() { + if err := p.UpgradeDBs(instance); err != nil { + // not adding reset as this action should not be run twice + // log.Error(err, "Resetting action flag on failure") + return err + } + // Can return without continuing down to restart logic cause resetting a peer will + // initiate a restart anyways + instance.ResetUpgradeDBs() + + } else if update.RestartNeeded() { + if err := p.RestartAction(instance); err != nil { + log.Error(err, "Resetting action flag on failure") + instance.ResetRestart() + return err + } + instance.ResetRestart() + } + + if err := p.Client.Patch(context.TODO(), instance, k8sclient.MergeFrom(orig)); err != nil { + return errors.Wrap(err, "failed to reset action flags") + } + + return nil +} + +func (p *Peer) ReenrollEcert(instance *current.IBPPeer) error { + log.Info("Ecert reenroll triggered via action parameter") + if err := p.reenrollCert(instance, commoninit.ECERT, false); err != nil { + return errors.Wrap(err, "ecert reenroll reusing existing private key action failed") + } + return nil +} + +func (p *Peer) ReenrollEcertNewKey(instance *current.IBPPeer) error { + log.Info("Ecert with new key reenroll triggered via action parameter") + if err := p.reenrollCert(instance, commoninit.ECERT, true); err != nil { + return errors.Wrap(err, "ecert reenroll with new key action failed") + } + return nil +} + +func (p *Peer) ReenrollTLSCert(instance *current.IBPPeer) error { + log.Info("TLS reenroll triggered via action parameter") + if err := p.reenrollCert(instance, commoninit.TLS, false); err != nil { + return errors.Wrap(err, "tls reenroll reusing existing private key action failed") + } + return nil +} + +func (p *Peer) ReenrollTLSCertNewKey(instance *current.IBPPeer) error { + log.Info("TLS with new key reenroll triggered via action parameter") + if err := p.reenrollCert(instance, commoninit.TLS, true); err != nil { + return errors.Wrap(err, "tls reenroll with new key action failed") + } + return nil +} + +func (p *Peer) reenrollCert(instance *current.IBPPeer, certType commoninit.SecretType, newKey bool) error { + return action.Reenroll(p, p.Client, certType, instance, newKey) +} + +func (p *Peer) RestartAction(instance *current.IBPPeer) error { + log.Info("Restart triggered via action parameter") + if err := p.Restart.ForRestartAction(instance); err != nil { + return errors.Wrap(err, "failed to restart peer pods") + } + return nil +} + +func (p *Peer) HandleRestart(instance *current.IBPPeer, update Update) error { + // If restart is disabled for components, can return immediately + if p.Config.Operator.Restart.Disable.Components { + return nil + } + + err := p.Restart.TriggerIfNeeded(instance) + if err != nil { + return errors.Wrap(err, "failed to restart deployment") + } + + return nil +} + +func (p *Peer) UpdateMSPCertificates(instance *current.IBPPeer) error { + log.Info("Updating certificates passed in MSP spec") + + updatedPeer, err := p.Initializer.GetUpdatedPeer(instance) + if err != nil { + return err + } + + crypto, err := updatedPeer.GenerateCrypto() + if err != nil { + return err + } + + if crypto != nil { + err = p.Initializer.UpdateSecretsFromResponse(instance, crypto) + if err != nil { + return err + } + } + + return nil +} + +func (p *Peer) RenewCert(certType commoninit.SecretType, obj runtime.Object, newKey bool) error { + instance := obj.(*current.IBPPeer) + if instance.Spec.Secret == nil { + return errors.New(fmt.Sprintf("missing secret spec for instance '%s'", instance.GetName())) + } + + if instance.Spec.Secret.Enrollment != nil { + log.Info(fmt.Sprintf("Renewing %s certificate for instance '%s'", string(certType), instance.Name)) + + hsmEnabled := instance.IsHSMEnabled() + storagePath := p.GetInitStoragePath(instance) + spec := instance.Spec.Secret.Enrollment + bccsp, err := p.GetBCCSPSectionForInstance(instance) + if err != nil { + return err + } + + err = p.CertificateManager.RenewCert(certType, instance, spec, bccsp, storagePath, hsmEnabled, newKey) + if err != nil { + return err + } + } else { + return errors.New("cannot auto-renew certificate created by MSP, force renewal required") + } + + return nil +} + +func (p *Peer) CustomLogic(instance *current.IBPPeer, update Update) (*current.CRStatus, *common.Result, error) { + var status *current.CRStatus + var err error + return status, nil, err +} diff --git a/pkg/offering/base/peer/peer_suite_test.go b/pkg/offering/base/peer/peer_suite_test.go new file mode 100644 index 00000000..1c54df97 --- /dev/null +++ b/pkg/offering/base/peer/peer_suite_test.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package basepeer_test + +import ( + "net" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} + +var ( + ln net.Listener +) + +var _ = BeforeSuite(func() { + var err error + ln, err = net.Listen("tcp", "0.0.0.0:2347") + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + ln.Close() +}) diff --git a/pkg/offering/base/peer/peer_test.go b/pkg/offering/base/peer/peer_test.go new file mode 100644 index 00000000..c48bcf63 --- /dev/null +++ b/pkg/offering/base/peer/peer_test.go @@ -0,0 +1,936 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package basepeer_test + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + "math/big" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + commonconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/mspparser" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + pconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + peermocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +var _ = Describe("Base Peer", func() { + var ( + peer *basepeer.Peer + instance *current.IBPPeer + mockKubeClient *cmocks.Client + cfg *config.Config + + deploymentMgr *peermocks.DeploymentManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + couchPvcMgr *managermocks.ResourceManager + configMapMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + + certificateMgr *peermocks.CertificateManager + initializer *peermocks.InitializeIBPPeer + update *mocks.Update + ) + + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + PeerExternalEndpoint: "address", + Domain: "domain", + HSM: ¤t.HSM{ + PKCS11Endpoint: "tcp://0.0.0.0:2347", + }, + StateDb: "couchdb", + Images: ¤t.PeerImages{ + PeerTag: "1.4.7-20200611", + }, + Replicas: &replicas, + FabricVersion: "1.4.9", + }, + } + instance.Kind = "IBPPeer" + instance.Name = "peer1" + instance.Namespace = "random" + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPPeer: + o := obj.(*current.IBPPeer) + o.Kind = "IBPPeer" + instance = o + case *corev1.Service: + o := obj.(*corev1.Service) + o.Spec.Type = corev1.ServiceTypeNodePort + o.Spec.Ports = append(o.Spec.Ports, corev1.ServicePort{ + Name: "peer-api", + TargetPort: intstr.IntOrString{ + IntVal: 7051, + }, + NodePort: int32(7051), + }) + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case "tls-" + instance.Name + "-signcert": + o.Name = "tls-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "tls-" + instance.Name + "-keystore": + o.Name = "tls-" + instance.Name + "-keystore" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "tls-" + instance.Name + "-cacerts": + o.Name = "tls-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "ecert-" + instance.Name + "-signcert": + o.Name = "ecert-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "ecert-" + instance.Name + "-cacerts": + o.Name = "ecert-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cacert-0.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + } + } + return nil + } + instance.Status.Version = version.Operator + + deploymentMgr = &peermocks.DeploymentManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + couchPvcMgr = &managermocks.ResourceManager{} + configMapMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + + scheme := &runtime.Scheme{} + cfg = &config.Config{ + PeerInitConfig: &peerinit.Config{ + OUFile: "../../../../defaultconfig/peer/ouconfig.yaml", + CorePeerFile: "../../../../defaultconfig/peer/core.yaml", + }, + Operator: config.Operator{ + Versions: &deployer.Versions{ + Peer: map[string]deployer.VersionPeer{ + "1.4.9-0": { + Default: true, + Image: deployer.PeerImages{ + PeerImage: "peerimage", + PeerTag: "1.4.9", + PeerInitImage: "peerinitimage", + PeerInitTag: "1.4.9", + }, + }, + }, + }, + }, + } + initializer = &peermocks.InitializeIBPPeer{} + initializer.GetInitPeerReturns(&peerinit.Peer{}, nil) + + certificateMgr = &peermocks.CertificateManager{} + restartMgr := &peermocks.RestartManager{} + peer = &basepeer.Peer{ + Client: mockKubeClient, + Scheme: scheme, + Config: cfg, + + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + StateDBPVCManager: couchPvcMgr, + FluentDConfigMapManager: configMapMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Initializer: initializer, + + CertificateManager: certificateMgr, + RenewCertTimers: make(map[string]*time.Timer), + + Restart: restartMgr, + } + }) + + Context("pre reconcile checks", func() { + Context("version and images", func() { + Context("create CR", func() { + It("returns an error if fabric version is not set in spec", func() { + instance.Spec.FabricVersion = "" + _, err := peer.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + + Context("images section blank", func() { + BeforeEach(func() { + instance.Spec.Images = nil + }) + + It("normalizes fabric version and requests a requeue", func() { + instance.Spec.FabricVersion = "1.4.9" + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + }) + + It("returns an error if fabric version not supported", func() { + instance.Spec.FabricVersion = "0.0.1" + _, err := peer.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version '0.0.1' is not supported"))) + }) + + When("version is passed without hyphen", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9" + }) + + It("finds default version for release and updates images section", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "peerimage", + PeerTag: "1.4.9", + PeerInitImage: "peerinitimage", + PeerInitTag: "1.4.9", + })) + }) + }) + + When("version is passed with hyphen", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("looks images and updates images section", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "peerimage", + PeerTag: "1.4.9", + PeerInitImage: "peerinitimage", + PeerInitTag: "1.4.9", + })) + }) + }) + }) + + Context("images section passed", func() { + BeforeEach(func() { + instance.Spec.Images = ¤t.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.0", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.0", + } + }) + + When("version is not passed", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "" + }) + + It("returns an error", func() { + _, err := peer.PreReconcileChecks(instance, update) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + }) + + When("version is passed", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.0.0-8" + }) + + It("persists current spec configuration", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.0-8")) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.0", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.0", + })) + }) + }) + }) + }) + + Context("update CR", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.0.1-0" + instance.Spec.Images = ¤t.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.1", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.1", + } + }) + + When("images updated", func() { + BeforeEach(func() { + update.ImagesUpdatedReturns(true) + instance.Spec.Images = ¤t.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.8", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.8", + } + }) + + Context("and version updated", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + instance.Spec.FabricVersion = "2.0.1-8" + }) + + It("persists current spec configuration", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.1-8")) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.8", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.8", + })) + }) + }) + + Context("and version not updated", func() { + It("persists current spec configuration", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("2.0.1-0")) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.8", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.8", + })) + }) + }) + }) + + When("images not updated", func() { + Context("and version updated during operator migration", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + instance.Spec.FabricVersion = "unsupported" + }) + + It("persists current spec configuration", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.Spec.FabricVersion).To(Equal("unsupported")) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.1", + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.1", + })) + }) + }) + + Context("and version updated (not during operator migration)", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + }) + + When("using non-hyphenated version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9" + }) + + It("looks images and updates images section", func() { + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "peerimage", + PeerTag: "1.4.9", + PeerInitImage: "peerinitimage", + PeerInitTag: "1.4.9", + })) + }) + }) + + When("using hyphenated version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("looks images and updates images section", func() { + instance.Spec.RegistryURL = "test.cr" + requeue, err := peer.PreReconcileChecks(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(instance.Spec.FabricVersion).To(Equal("1.4.9-0")) + Expect(*instance.Spec.Images).To(Equal(current.PeerImages{ + PeerImage: "test.cr/peerimage", + PeerTag: "1.4.9", + PeerInitImage: "test.cr/peerinitimage", + PeerInitTag: "1.4.9", + })) + }) + }) + }) + }) + }) + }) + }) + + Context("Reconciles", func() { + It("returns nil and requeues request if instance version updated", func() { + instance.Status.Version = "" + _, err := peer.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchStatusCallCount()).To(Equal(1)) + }) + It("returns a breaking error if initialization fails", func() { + cfg.PeerInitConfig.CorePeerFile = "../../../../../defaultconfig/peer/badfile.yaml" + peer.Initializer = peerinit.New(cfg.PeerInitConfig, nil, nil, nil, nil, enroller.HSMEnrollJobTimeouts{}) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 22 - failed to initialize peer: open")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("returns an error for invalid HSM endpoint", func() { + instance.Spec.HSM.PKCS11Endpoint = "tcp://:2347" + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("failed pre reconcile checks: invalid HSM endpoint for peer instance '%s': missing IP address", instance.Name))) + }) + + It("returns an error domain is not set", func() { + instance.Spec.Domain = "" + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("failed pre reconcile checks: domain not set for peer instance '%s'", instance.Name))) + }) + + It("returns an error if both enroll and reenroll action for ecert set to true", func() { + instance.Spec.Action.Enroll.Ecert = true + instance.Spec.Action.Reenroll.Ecert = true + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed pre reconcile checks: both enroll and renenroll action requested for ecert, must only select one")) + }) + + It("returns an error if both enroll and reenroll action for TLS cert set to true", func() { + instance.Spec.Action.Enroll.TLSCert = true + instance.Spec.Action.Reenroll.TLSCert = true + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed pre reconcile checks: both enroll and renenroll action requested for TLS cert, must only select one")) + }) + + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if couch pvc manager fails to reconcile", func() { + couchPvcMgr.ReconcileReturns(errors.New("failed to reconcile couch pvc")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed CouchDB PVC reconciliation: failed to reconcile couch pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role binding")) + }) + + It("returns an error if service account binding manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile service account")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + configMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed FluentD ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("does not return an error on a successful reconcile", func() { + _, err := peer.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("secret", func() { + It("does not try to create secret if the get request returns an error other than 'not found'", func() { + errMsg := "connection refused" + mockKubeClient.GetReturns(errors.New(errMsg)) + err := peer.ReconcileSecret(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + When("secret does not exist", func() { + BeforeEach(func() { + notFoundErr := &k8serror.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } + mockKubeClient.GetReturns(notFoundErr) + }) + + It("returns an error if the creation of the Secret fails", func() { + errMsg := "unable to create secret" + mockKubeClient.CreateReturns(errors.New(errMsg)) + err := peer.ReconcileSecret(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(errMsg)) + }) + + It("does not return an error on a successfull secret creation", func() { + err := peer.ReconcileSecret(instance) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Context("check csr hosts", func() { + It("adds csr hosts if not present", func() { + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{}, + }, + }, + } + hosts := []string{"test.com", "127.0.0.1"} + peer.CheckCSRHosts(instance, hosts) + Expect(instance.Spec.Secret.Enrollment.TLS).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(Equal(hosts)) + }) + + It("appends csr hosts if passed", func() { + hostsCustom := []string{"custom.domain.com"} + hosts := []string{"test.com", "127.0.0.1"} + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Secret: ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + TLS: ¤t.Enrollment{ + CSR: ¤t.CSR{ + Hosts: hostsCustom, + }, + }, + }, + }, + }, + } + peer.CheckCSRHosts(instance, hosts) + Expect(instance.Spec.Secret.Enrollment.TLS).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR).NotTo(BeNil()) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(ContainElement(hostsCustom[0])) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(ContainElement(hosts[0])) + Expect(instance.Spec.Secret.Enrollment.TLS.CSR.Hosts).To(ContainElement(hosts[1])) + }) + }) + + Context("fabric peer migration", func() { + BeforeEach(func() { + overrides := &pconfig.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "pkcs11", + PKCS11: &commonapi.PKCS11Opts{ + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "msp/keystore", + }, + }, + }, + }, + }, + } + jmRaw, err := util.ConvertToJsonMessage(overrides) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: *jmRaw} + + coreBytes, err := yaml.Marshal(overrides) + Expect(err).NotTo(HaveOccurred()) + + cm := &corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "core.yaml": coreBytes, + }, + } + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + o.Name = "core-config" + o.BinaryData = cm.BinaryData + } + return nil + } + }) + + It("removes keystore path value", func() { + peerConfig, err := peer.FabricPeerMigrationV1_4(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig.Peer.BCCSP.PKCS11.FileKeyStore).To(BeNil()) + }) + + When("fabric peer tag is less than 1.4.7", func() { + BeforeEach(func() { + instance.Spec.Images.PeerTag = "1.4.6-20200611" + }) + + It("returns without updating config", func() { + peerConfig, err := peer.FabricPeerMigrationV1_4(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig).To(BeNil()) + }) + }) + + When("hsm is not enabled", func() { + BeforeEach(func() { + overrides := &pconfig.Core{ + Core: v1.Core{ + Peer: v1.Peer{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "sw", + SW: &commonapi.SwOpts{ + FileKeyStore: commonapi.FileKeyStoreOpts{ + KeyStorePath: "msp/keystore", + }, + }, + }, + }, + }, + } + jmRaw, err := util.ConvertToJsonMessage(overrides) + Expect(err).NotTo(HaveOccurred()) + + instance.Spec.ConfigOverride = &runtime.RawExtension{Raw: *jmRaw} + }) + + It("returns without updating config", func() { + peerConfig, err := peer.FabricPeerMigrationV1_4(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(peerConfig).To(BeNil()) + }) + }) + }) + + Context("images override", func() { + var images *current.PeerImages + + Context("using registry url", func() { + BeforeEach(func() { + images = ¤t.PeerImages{ + PeerInitImage: "peerinitimage", + PeerInitTag: "2.0.0", + PeerImage: "peerimage", + PeerTag: "2.0.0", + DindImage: "dindimage", + DindTag: "2.0.0", + CouchDBImage: "couchimage", + CouchDBTag: "2.0.0", + GRPCWebImage: "grpcimage", + GRPCWebTag: "2.0.0", + FluentdImage: "fluentdimage", + FluentdTag: "2.0.0", + } + }) + + It("overrides images based with registry url and does not append more value on each call", func() { + images.Override(images, "ghcr.io/ibm-blockchain/", "amd64") + Expect(images.PeerInitImage).To(Equal("ghcr.io/ibm-blockchain/peerinitimage")) + Expect(images.PeerInitTag).To(Equal("2.0.0")) + Expect(images.PeerImage).To(Equal("ghcr.io/ibm-blockchain/peerimage")) + Expect(images.PeerTag).To(Equal("2.0.0")) + Expect(images.DindImage).To(Equal("ghcr.io/ibm-blockchain/dindimage")) + Expect(images.DindTag).To(Equal("2.0.0")) + Expect(images.CouchDBImage).To(Equal("ghcr.io/ibm-blockchain/couchimage")) + Expect(images.CouchDBTag).To(Equal("2.0.0")) + Expect(images.GRPCWebImage).To(Equal("ghcr.io/ibm-blockchain/grpcimage")) + Expect(images.GRPCWebTag).To(Equal("2.0.0")) + Expect(images.FluentdImage).To(Equal("ghcr.io/ibm-blockchain/fluentdimage")) + Expect(images.FluentdTag).To(Equal("2.0.0")) + }) + + It("overrides images based with registry url and does not append more value on each call", func() { + images.Override(images, "ghcr.io/ibm-blockchain/images/", "s390") + Expect(images.PeerInitImage).To(Equal("ghcr.io/ibm-blockchain/images/peerinitimage")) + Expect(images.PeerInitTag).To(Equal("2.0.0")) + Expect(images.PeerImage).To(Equal("ghcr.io/ibm-blockchain/images/peerimage")) + Expect(images.PeerTag).To(Equal("2.0.0")) + Expect(images.DindImage).To(Equal("ghcr.io/ibm-blockchain/images/dindimage")) + Expect(images.DindTag).To(Equal("2.0.0")) + Expect(images.CouchDBImage).To(Equal("ghcr.io/ibm-blockchain/images/couchimage")) + Expect(images.CouchDBTag).To(Equal("2.0.0")) + Expect(images.GRPCWebImage).To(Equal("ghcr.io/ibm-blockchain/images/grpcimage")) + Expect(images.GRPCWebTag).To(Equal("2.0.0")) + Expect(images.FluentdImage).To(Equal("ghcr.io/ibm-blockchain/images/fluentdimage")) + Expect(images.FluentdTag).To(Equal("2.0.0")) + }) + }) + + Context("using fully qualified path", func() { + BeforeEach(func() { + images = ¤t.PeerImages{ + PeerInitImage: "ghcr.io/ibm-blockchain/peerinitimage", + PeerInitTag: "2.0.0", + PeerImage: "ghcr.io/ibm-blockchain/peerimage", + PeerTag: "2.0.0", + DindImage: "ghcr.io/ibm-blockchain/dindimage", + DindTag: "2.0.0", + CouchDBImage: "ghcr.io/ibm-blockchain/couchimage", + CouchDBTag: "2.0.0", + GRPCWebImage: "ghcr.io/ibm-blockchain/grpcimage", + GRPCWebTag: "2.0.0", + FluentdImage: "ghcr.io/ibm-blockchain/fluentdimage", + FluentdTag: "2.0.0", + } + }) + + It("keeps images and adds arch to tag", func() { + images.Override(images, "", "amd64") + Expect(images.PeerInitImage).To(Equal("ghcr.io/ibm-blockchain/peerinitimage")) + Expect(images.PeerInitTag).To(Equal("2.0.0")) + Expect(images.PeerImage).To(Equal("ghcr.io/ibm-blockchain/peerimage")) + Expect(images.PeerTag).To(Equal("2.0.0")) + Expect(images.DindImage).To(Equal("ghcr.io/ibm-blockchain/dindimage")) + Expect(images.DindTag).To(Equal("2.0.0")) + Expect(images.CouchDBImage).To(Equal("ghcr.io/ibm-blockchain/couchimage")) + Expect(images.CouchDBTag).To(Equal("2.0.0")) + Expect(images.GRPCWebImage).To(Equal("ghcr.io/ibm-blockchain/grpcimage")) + Expect(images.GRPCWebTag).To(Equal("2.0.0")) + Expect(images.FluentdImage).To(Equal("ghcr.io/ibm-blockchain/fluentdimage")) + Expect(images.FluentdTag).To(Equal("2.0.0")) + }) + }) + }) + + Context("update connection profile", func() { + It("returns error if fails to get cert", func() { + mockKubeClient.GetReturns(errors.New("get error")) + err := peer.UpdateConnectionProfile(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("get error")) + }) + + It("updates connection profile cm", func() { + err := peer.UpdateConnectionProfile(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.GetCallCount()).To(Equal(7)) + }) + }) + + Context("update msp certificates", func() { + const testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNpVENDQWkrZ0F3SUJBZ0lVRkd3N0RjK0QvZUoyY08wOHd6d2tialIzK1M4d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRBd09URTBNakF3TUZvWERUSXdNVEF3T0RFME1qQXdNRm93YnpFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1TQXdIZ1lEVlFRREV4ZFRZV0ZrY3kxTllXTkNiMjlyCkxWQnlieTVzYjJOaGJEQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBK0JBRzhZakJvTllabGgKRjFrVHNUbHd6VERDQTJocDhZTXI5Ky8vbEd0NURoSGZVT1c3bkhuSW1USHlPRjJQVjFPcVRuUWhUbWpLYTdaQwpqeU9BUWxLamdhOHdnYXd3RGdZRFZSMFBBUUgvQkFRREFnT29NQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CCkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTbHJjL0lNQkxvMzR0UktvWnEKNTQreDIyYWEyREFmQmdOVkhTTUVHREFXZ0JSWmpxT3RQZWJzSFI2UjBNQUhrNnd4ei85UFZqQXRCZ05WSFJFRQpKakFrZ2hkVFlXRmtjeTFOWVdOQ2IyOXJMVkJ5Ynk1c2IyTmhiSUlKYkc5allXeG9iM04wTUFvR0NDcUdTTTQ5CkJBTUNBMGdBTUVVQ0lRRGR0Y1QwUE9FQXJZKzgwdEhmWUwvcXBiWWoxMGU2eWlPWlpUQ29wY25mUVFJZ1FNQUQKaFc3T0NSUERNd3lqKzNhb015d2hFenFHYy9jRDJSU2V5ekRiRjFFPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + + BeforeEach(func() { + msp := ¤t.MSP{ + SignCerts: testcert, + CACerts: []string{testcert}, + KeyStore: "keystore", + } + + initializer.GetUpdatedPeerReturns(&peerinit.Peer{ + Cryptos: &commonconfig.Cryptos{ + TLS: &mspparser.MSPParser{ + Config: msp, + }, + }, + }, nil) + + }) + + It("returns error if fails to get update msp parsers", func() { + initializer.GetUpdatedPeerReturns(nil, errors.New("get error")) + err := peer.UpdateMSPCertificates(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("get error")) + }) + + It("returns error if fails to generate crypto", func() { + initializer.GetUpdatedPeerReturns(&peerinit.Peer{ + Cryptos: &commonconfig.Cryptos{ + TLS: &mspparser.MSPParser{ + Config: ¤t.MSP{ + SignCerts: "invalid", + }, + }, + }, + }, nil) + err := peer.UpdateMSPCertificates(instance) + Expect(err).To(HaveOccurred()) + }) + + It("returns error if fails to update secrets", func() { + initializer.UpdateSecretsFromResponseReturns(errors.New("update error")) + err := peer.UpdateMSPCertificates(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("update error")) + }) + + It("updates secrets of certificates passed through MSP spec", func() { + err := peer.UpdateMSPCertificates(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(initializer.UpdateSecretsFromResponseCallCount()).To(Equal(1)) + }) + }) + + Context("enroll for ecert", func() { + It("returns error if no enrollment information provided", func() { + err := peer.EnrollForEcert(instance) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("unable to enroll, no ecert enrollment information provided"))) + }) + + It("returns error if enrollment with ca fails", func() { + instance.Spec.Secret = ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + Component: ¤t.Enrollment{}, + }, + } + err := peer.EnrollForEcert(instance) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("failed to enroll for ecert"))) + }) + }) + + Context("enroll for TLS cert", func() { + It("returns error if no enrollment information provided", func() { + err := peer.EnrollForTLSCert(instance) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("unable to enroll, no TLS enrollment information provided"))) + }) + + It("returns error if enrollment with ca fails", func() { + instance.Spec.Secret = ¤t.SecretSpec{ + Enrollment: ¤t.EnrollmentSpec{ + TLS: ¤t.Enrollment{}, + }, + } + err := peer.EnrollForTLSCert(instance) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("failed to enroll for TLS cert"))) + }) + }) +}) + +func generateCertPemBytes(daysUntilExpired int) []byte { + certtemplate := x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Duration(daysUntilExpired) * time.Hour * 24), + } + + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + Expect(err).NotTo(HaveOccurred()) + + cert, err := x509.CreateCertificate(rand.Reader, &certtemplate, &certtemplate, &priv.PublicKey, priv) + Expect(err).NotTo(HaveOccurred()) + + block := &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert, + } + + return pem.EncodeToMemory(block) +} diff --git a/pkg/offering/common/backupcrypto.go b/pkg/offering/common/backupcrypto.go new file mode 100644 index 00000000..f85e8d7b --- /dev/null +++ b/pkg/offering/common/backupcrypto.go @@ -0,0 +1,367 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "context" + "encoding/json" + "fmt" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("backup_crypto") + +// Number of iterations we are storing +const ITERATIONS = 10 + +type Backup struct { + List []*current.MSP `json:"list"` + Timestamp string `json:"timestamp"` +} + +type Crypto struct { + TLS *current.MSP + Ecert *current.MSP + Operations *current.MSP + CA *current.MSP +} + +func BackupCrypto(client k8sclient.Client, scheme *runtime.Scheme, instance v1.Object, labels map[string]string) error { + tlsCrypto := GetCrypto("tls", client, instance) + ecertCrypto := GetCrypto("ecert", client, instance) + + if tlsCrypto == nil && ecertCrypto == nil { + // No backup required if crypto doesn't exist/no found + log.Info(fmt.Sprintf("No TLS or ecert crypto found for %s, not performing backup", instance.GetName())) + return nil + } + + crypto := &Crypto{ + TLS: tlsCrypto, + Ecert: ecertCrypto, + } + + return backupCrypto(client, scheme, instance, labels, crypto) +} + +func BackupCACrypto(client k8sclient.Client, scheme *runtime.Scheme, instance v1.Object, labels map[string]string) error { + caCrypto, operationsCrypto, tlsCrypto := GetCACrypto(client, instance) + + if caCrypto == nil && operationsCrypto == nil && tlsCrypto == nil { + log.Info(fmt.Sprintf("No crypto found for %s, not performing backup", instance.GetName())) + return nil + } + + crypto := &Crypto{ + CA: caCrypto, + Operations: operationsCrypto, + TLS: tlsCrypto, + } + return backupCrypto(client, scheme, instance, labels, crypto) +} + +func backupCrypto(client k8sclient.Client, scheme *runtime.Scheme, instance v1.Object, labels map[string]string, crypto *Crypto) error { + backupSecret, err := GetBackupSecret(client, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Create secret + data, err := CreateBackupSecretData(crypto) + if err != nil { + return errors.Wrap(err, "failed to create backup secret data") + } + + newSecret := &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-crypto-backup", instance.GetName()), + Namespace: instance.GetNamespace(), + Labels: labels, + }, + Data: data, + Type: corev1.SecretTypeOpaque, + } + + err = CreateBackupSecret(client, scheme, instance, newSecret) + if err != nil { + return errors.Wrap(err, "failed to create backup secret") + } + return nil + } + return errors.Wrap(err, "failed to get backup secret") + } + + // Update secret + data, err := UpdateBackupSecretData(backupSecret.Data, crypto) + if err != nil { + return errors.Wrap(err, "failed to update backup secret data") + } + backupSecret.Data = data + + err = UpdateBackupSecret(client, scheme, instance, backupSecret) + if err != nil { + return errors.Wrap(err, "failed to update backup secret") + } + + return nil +} + +func CreateBackupSecretData(crypto *Crypto) (map[string][]byte, error) { + data := map[string][]byte{} + + if crypto.TLS != nil { + tlsBackup := &Backup{ + List: []*current.MSP{crypto.TLS}, + Timestamp: time.Now().String(), + } + tlsBytes, err := json.Marshal(tlsBackup) + if err != nil { + return nil, err + } + data["tls-backup.json"] = tlsBytes + } + + if crypto.Ecert != nil { + ecertBackup := &Backup{ + List: []*current.MSP{crypto.Ecert}, + Timestamp: time.Now().String(), + } + ecertBytes, err := json.Marshal(ecertBackup) + if err != nil { + return nil, err + } + data["ecert-backup.json"] = ecertBytes + } + + if crypto.Operations != nil { + opBackup := &Backup{ + List: []*current.MSP{crypto.Operations}, + Timestamp: time.Now().String(), + } + opBytes, err := json.Marshal(opBackup) + if err != nil { + return nil, err + } + data["operations-backup.json"] = opBytes + } + + if crypto.CA != nil { + caBackup := &Backup{ + List: []*current.MSP{crypto.CA}, + Timestamp: time.Now().String(), + } + caBytes, err := json.Marshal(caBackup) + if err != nil { + return nil, err + } + data["ca-backup.json"] = caBytes + } + + return data, nil +} + +func UpdateBackupSecretData(data map[string][]byte, crypto *Crypto) (map[string][]byte, error) { + if crypto.TLS != nil { + tlsBackup, err := getUpdatedBackup(data["tls-backup.json"], crypto.TLS) + if err != nil { + return nil, err + } + tlsBytes, err := json.Marshal(tlsBackup) + if err != nil { + return nil, err + } + data["tls-backup.json"] = tlsBytes + } + + if crypto.Ecert != nil { + ecertBackup, err := getUpdatedBackup(data["ecert-backup.json"], crypto.Ecert) + if err != nil { + return nil, err + } + ecertBytes, err := json.Marshal(ecertBackup) + if err != nil { + return nil, err + } + data["ecert-backup.json"] = ecertBytes + } + + if crypto.Operations != nil { + opBackup, err := getUpdatedBackup(data["operations-backup.json"], crypto.Operations) + if err != nil { + return nil, err + } + opBytes, err := json.Marshal(opBackup) + if err != nil { + return nil, err + } + data["operations-backup.json"] = opBytes + } + + if crypto.CA != nil { + caBackup, err := getUpdatedBackup(data["ca-backup.json"], crypto.CA) + if err != nil { + return nil, err + } + caBytes, err := json.Marshal(caBackup) + if err != nil { + return nil, err + } + data["ca-backup.json"] = caBytes + } + + return data, nil +} + +func getUpdatedBackup(data []byte, crypto *current.MSP) (*Backup, error) { + backup := &Backup{} + if data != nil { + err := json.Unmarshal(data, backup) + if err != nil { + return nil, err + } + + if len(backup.List) < ITERATIONS { + // Insert to back of queue + backup.List = append(backup.List, crypto) + } else { + // Remove oldest backup and insert new crypto + backup.List = append(backup.List[1:], crypto) + } + } else { + // Create backup + backup.List = []*current.MSP{crypto} + } + + backup.Timestamp = time.Now().String() + + return backup, nil +} + +func GetCrypto(prefix common.SecretType, client k8sclient.Client, instance v1.Object) *current.MSP { + var cryptoExists bool + + // Doesn't return error if can't get secret/secret not found + signcert, err := getSignCertEncoded(prefix, client, instance) + if err == nil && signcert != "" { + cryptoExists = true + } + + keystore, err := getKeystoreEncoded(prefix, client, instance) + if err == nil && keystore != "" { + cryptoExists = true + } + + cacerts, err := getCACertEncoded(prefix, client, instance) + if err == nil && cacerts != nil { + cryptoExists = true + } + + admincerts, err := getAdmincertEncoded(prefix, client, instance) + if err == nil && admincerts != nil { + cryptoExists = true + } + + intercerts, err := getIntermediateCertEncoded(prefix, client, instance) + if err == nil && intercerts != nil { + cryptoExists = true + } + + if cryptoExists { + return ¤t.MSP{ + SignCerts: signcert, + KeyStore: keystore, + CACerts: cacerts, + AdminCerts: admincerts, + IntermediateCerts: intercerts, + } + } + + return nil +} + +func GetCACrypto(client k8sclient.Client, instance v1.Object) (*current.MSP, *current.MSP, *current.MSP) { + encoded, err := GetCACryptoEncoded(client, instance) + if err != nil || encoded == nil { + return nil, nil, nil + } + + caMSP := ¤t.MSP{ + SignCerts: encoded.Cert, + KeyStore: encoded.Key, + } + + operationsMSP := ¤t.MSP{ + SignCerts: encoded.OperationsCert, + KeyStore: encoded.OperationsKey, + } + + tlsMSP := ¤t.MSP{ + SignCerts: encoded.TLSCert, + KeyStore: encoded.TLSKey, + } + + return caMSP, operationsMSP, tlsMSP +} + +func GetBackupSecret(client k8sclient.Client, instance v1.Object) (*corev1.Secret, error) { + secretName := fmt.Sprintf("%s-crypto-backup", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + return secret, nil +} + +func CreateBackupSecret(client k8sclient.Client, scheme *runtime.Scheme, instance v1.Object, secret *corev1.Secret) error { + err := client.Create(context.TODO(), secret, k8sclient.CreateOption{ + Owner: instance, + Scheme: scheme, + }) + if err != nil { + return err + } + return nil +} + +func UpdateBackupSecret(client k8sclient.Client, scheme *runtime.Scheme, instance v1.Object, secret *corev1.Secret) error { + err := client.Update(context.TODO(), secret, k8sclient.UpdateOption{ + Owner: instance, + Scheme: scheme, + }) + if err != nil { + return err + } + return nil +} diff --git a/pkg/offering/common/common_suite_test.go b/pkg/offering/common/common_suite_test.go new file mode 100644 index 00000000..9dbd6afc --- /dev/null +++ b/pkg/offering/common/common_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCommon(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Common Suite") +} diff --git a/pkg/offering/common/common_test.go b/pkg/offering/common/common_test.go new file mode 100644 index 00000000..1550105b --- /dev/null +++ b/pkg/offering/common/common_test.go @@ -0,0 +1,346 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" +) + +var _ = Describe("Common", func() { + + const testcert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo" + + var ( + mockKubeClient *mocks.Client + instance *current.IBPPeer + + crypto1 *current.MSP + crypto2 *current.MSP + crypto3 *current.MSP + + encodedtestcert string + + backupData map[string][]byte + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + + instance = ¤t.IBPPeer{} + instance.Name = "peer1" + + crypto1 = ¤t.MSP{SignCerts: "signcert1"} + crypto2 = ¤t.MSP{SignCerts: "signcert2"} + crypto3 = ¤t.MSP{SignCerts: "signcert3"} + + backup := &common.Backup{ + List: []*current.MSP{crypto1}, + Timestamp: time.Now().String(), + } + backupBytes, err := json.Marshal(backup) + Expect(err).NotTo(HaveOccurred()) + + backupData = map[string][]byte{ + "tls-backup.json": backupBytes, + "ecert-backup.json": backupBytes, + } + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.Secret) + switch types.Name { + case "tls-" + instance.Name + "-signcert": + o.Name = "tls-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte(testcert)} + case "tls-" + instance.Name + "-keystore": + o.Name = "tls-" + instance.Name + "-keystore" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte(testcert)} + case "tls-" + instance.Name + "-cacerts": + o.Name = "tls-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"key.pem": []byte(testcert)} + case "ecert-" + instance.Name + "-signcert": + o.Name = "ecert-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte(testcert)} + case "ecert-" + instance.Name + "-cacerts": + o.Name = "ecert-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cacert-0.pem": []byte(testcert)} + case "peer1-crypto-backup": + o.Name = instance.Name + "-crypto-backup" + o.Namespace = instance.Namespace + o.Data = backupData + case "ca1-ca-crypto": + o.Name = instance.Name + "-ca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{ + "tls-cert.pem": []byte(testcert), + "tls-key.pem": []byte(testcert), + "cert.pem": []byte(testcert), + "key.pem": []byte(testcert), + "operations-cert.pem": []byte(testcert), + "operations-key.pem": []byte(testcert), + } + case "ca1-crypto-backup": + o.Name = "ca1-crypto-backup" + o.Namespace = instance.Namespace + o.Data = backupData + } + return nil + } + + encodedtestcert = base64.StdEncoding.EncodeToString([]byte(testcert)) + }) + + Context("backup crypto", func() { + + Context("get crypto", func() { + It("returns nil if fails to get secret", func() { + mockKubeClient.GetReturns(errors.New("get error")) + crypto := common.GetCrypto("tls", mockKubeClient, instance) + Expect(crypto).To(BeNil()) + }) + + It("returns nil if no secrets are found", func() { + mockKubeClient.GetReturns(k8serrors.NewNotFound(schema.GroupResource{}, "not found")) + crypto := common.GetCrypto("tls", mockKubeClient, instance) + Expect(crypto).To(BeNil()) + }) + + It("returns tls crypto", func() { + crypto := common.GetCrypto("tls", mockKubeClient, instance) + Expect(crypto).NotTo(BeNil()) + Expect(crypto).To(Equal(¤t.MSP{ + SignCerts: encodedtestcert, + KeyStore: encodedtestcert, + CACerts: []string{encodedtestcert}, + })) + }) + + It("returns ecert crypto", func() { + crypto := common.GetCrypto("ecert", mockKubeClient, instance) + Expect(crypto).NotTo(BeNil()) + Expect(crypto).To(Equal(¤t.MSP{ + SignCerts: encodedtestcert, + CACerts: []string{encodedtestcert}, + })) + }) + }) + + Context("udpate secret data", func() { + var ( + data map[string][]byte + ) + + BeforeEach(func() { + + backup := &common.Backup{ + List: []*current.MSP{crypto1}, + Timestamp: time.Now().String(), + } + backupBytes, err := json.Marshal(backup) + Expect(err).NotTo(HaveOccurred()) + + data = map[string][]byte{ + "tls-backup.json": backupBytes, + "ecert-backup.json": backupBytes, + } + }) + + It("adds crypto to backup list", func() { + crypto := &common.Crypto{ + TLS: crypto2, + Ecert: crypto2, + } + updatedData, err := common.UpdateBackupSecretData(data, crypto) + Expect(err).NotTo(HaveOccurred()) + + By("updating tls backup", func() { + tlsbackup := &common.Backup{} + err = json.Unmarshal(updatedData["tls-backup.json"], tlsbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(tlsbackup.List).To(Equal([]*current.MSP{crypto1, crypto2})) + Expect(tlsbackup.Timestamp).NotTo(Equal("")) + }) + + By("updating ecert backup", func() { + ecertbackup := &common.Backup{} + err = json.Unmarshal(updatedData["ecert-backup.json"], ecertbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(ecertbackup.List).To(Equal([]*current.MSP{crypto1, crypto2})) + Expect(ecertbackup.Timestamp).NotTo(Equal("")) + }) + }) + + It("removes oldest crypto from queue and inserts new crypto if list is longer than 10", func() { + backup := &common.Backup{ + List: []*current.MSP{crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto2}, + Timestamp: time.Now().String(), + } + backupBytes, err := json.Marshal(backup) + Expect(err).NotTo(HaveOccurred()) + + data = map[string][]byte{ + "tls-backup.json": backupBytes, + "ecert-backup.json": backupBytes, + } + + crypto := &common.Crypto{ + TLS: crypto3, + Ecert: crypto3, + } + updatedData, err := common.UpdateBackupSecretData(data, crypto) + Expect(err).NotTo(HaveOccurred()) + + By("updating tls backup to contain 10 most recent backups", func() { + tlsbackup := &common.Backup{} + err = json.Unmarshal(updatedData["tls-backup.json"], tlsbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(tlsbackup.List).To(Equal([]*current.MSP{crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto2, crypto3})) + Expect(tlsbackup.Timestamp).NotTo(Equal("")) + }) + + By("updating ecert backup to contain 10 most recent backups", func() { + ecertbackup := &common.Backup{} + err = json.Unmarshal(updatedData["ecert-backup.json"], ecertbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(ecertbackup.List).To(Equal([]*current.MSP{crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto1, crypto2, crypto3})) + Expect(ecertbackup.Timestamp).NotTo(Equal("")) + }) + }) + }) + + Context("backup crypto", func() { + It("returns nil if neither TLS nor ecert crypto is found", func() { + mockKubeClient.GetReturns(errors.New("get error")) + err := common.BackupCrypto(mockKubeClient, &runtime.Scheme{}, instance, map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns error if fails to update backup secret", func() { + mockKubeClient.UpdateReturns(errors.New("create or update error")) + err := common.BackupCrypto(mockKubeClient, &runtime.Scheme{}, instance, map[string]string{}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to update backup secret: create or update error")) + }) + + It("updates backup secret if one exists for instance", func() { + err := common.BackupCrypto(mockKubeClient, &runtime.Scheme{}, instance, map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.UpdateCallCount()).To(Equal(1)) + + newCrypto := ¤t.MSP{ + SignCerts: encodedtestcert, + KeyStore: encodedtestcert, + CACerts: []string{encodedtestcert}, + } + + By("updating tls backup", func() { + tlsbackup := &common.Backup{} + err = json.Unmarshal(backupData["tls-backup.json"], tlsbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(tlsbackup.List).To(Equal([]*current.MSP{crypto1, newCrypto})) + Expect(tlsbackup.Timestamp).NotTo(Equal("")) + }) + + By("updating ecert backup", func() { + newCrypto.KeyStore = "" + ecertbackup := &common.Backup{} + err = json.Unmarshal(backupData["ecert-backup.json"], ecertbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(ecertbackup.List).To(Equal([]*current.MSP{crypto1, newCrypto})) + Expect(ecertbackup.Timestamp).NotTo(Equal("")) + }) + }) + }) + + Context("backup CA crypto", func() { + var ( + instance *current.IBPCA + ) + + BeforeEach(func() { + instance = ¤t.IBPCA{} + instance.Name = "ca1" + }) + + It("returns nil if CA TLS crypto is not found", func() { + mockKubeClient.GetReturns(errors.New("get error")) + err := common.BackupCACrypto(mockKubeClient, &runtime.Scheme{}, instance, map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("updates backup secret if one exists for instance", func() { + err := common.BackupCACrypto(mockKubeClient, &runtime.Scheme{}, instance, map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.UpdateCallCount()).To(Equal(1)) + + newCrypto := ¤t.MSP{ + SignCerts: encodedtestcert, + KeyStore: encodedtestcert, + } + + By("updating tls backup", func() { + tlsbackup := &common.Backup{} + err = json.Unmarshal(backupData["tls-backup.json"], tlsbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(tlsbackup.List).To(Equal([]*current.MSP{crypto1, newCrypto})) + Expect(tlsbackup.Timestamp).NotTo(Equal("")) + }) + + By("creating operations backup", func() { + opbackup := &common.Backup{} + err = json.Unmarshal(backupData["operations-backup.json"], opbackup) + Expect(err).NotTo(HaveOccurred()) + Expect(opbackup.List).To(Equal([]*current.MSP{newCrypto})) + Expect(opbackup.Timestamp).NotTo(Equal("")) + }) + + By("creating ca backup", func() { + cabackup := &common.Backup{} + err = json.Unmarshal(backupData["ca-backup.json"], cabackup) + Expect(err).NotTo(HaveOccurred()) + Expect(cabackup.List).To(Equal([]*current.MSP{newCrypto})) + Expect(cabackup.Timestamp).NotTo(Equal("")) + }) + }) + }) + + }) + +}) diff --git a/pkg/offering/common/override.go b/pkg/offering/common/override.go new file mode 100644 index 00000000..29a4e941 --- /dev/null +++ b/pkg/offering/common/override.go @@ -0,0 +1,108 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func GetNodeSelectorTerms(arch []string, zone, region string) []corev1.NodeSelectorTerm { + nodeSelectorTerms := []corev1.NodeSelectorTerm{ + corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{}, + }, + corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{}, + }, + } + + AddArchSelector(arch, &nodeSelectorTerms) + AddZoneSelector(zone, &nodeSelectorTerms) + AddRegionSelector(region, &nodeSelectorTerms) + + return nodeSelectorTerms +} + +func AddArchSelector(arch []string, nodeSelectorTerms *[]corev1.NodeSelectorTerm) { + if len(arch) != 0 { + archNode := corev1.NodeSelectorRequirement{ + Key: "kubernetes.io/arch", + Operator: corev1.NodeSelectorOpIn, + Values: arch, + } + (*nodeSelectorTerms)[0].MatchExpressions = append((*nodeSelectorTerms)[0].MatchExpressions, archNode) + } +} + +func AddZoneSelector(zone string, nodeSelectorTerms *[]corev1.NodeSelectorTerm) { + zoneNode := corev1.NodeSelectorRequirement{ + Key: "topology.kubernetes.io/zone", + Operator: corev1.NodeSelectorOpIn, + } + zoneNodeOld := corev1.NodeSelectorRequirement{ + Key: "failure-domain.beta.kubernetes.io/zone", + Operator: corev1.NodeSelectorOpIn, + } + if zone != "" { + zoneNode.Values = []string{zone} + zoneNodeOld.Values = []string{zone} + (*nodeSelectorTerms)[0].MatchExpressions = append((*nodeSelectorTerms)[0].MatchExpressions, zoneNode) + (*nodeSelectorTerms)[1].MatchExpressions = append((*nodeSelectorTerms)[1].MatchExpressions, zoneNodeOld) + } +} + +func AddRegionSelector(region string, nodeSelectorTerms *[]corev1.NodeSelectorTerm) { + regionNode := corev1.NodeSelectorRequirement{ + Key: "topology.kubernetes.io/region", + Operator: corev1.NodeSelectorOpIn, + } + regionNodeOld := corev1.NodeSelectorRequirement{ + Key: "failure-domain.beta.kubernetes.io/region", + Operator: corev1.NodeSelectorOpIn, + } + if region != "" { + regionNode.Values = []string{region} + regionNodeOld.Values = []string{region} + (*nodeSelectorTerms)[0].MatchExpressions = append((*nodeSelectorTerms)[0].MatchExpressions, regionNode) + (*nodeSelectorTerms)[1].MatchExpressions = append((*nodeSelectorTerms)[1].MatchExpressions, regionNodeOld) + } +} + +func GetPodAntiAffinity(orgName string) *corev1.PodAntiAffinity { + return &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{ + Key: "orgname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{orgName}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } +} diff --git a/pkg/offering/common/reconcilechecks/fabricversion.go b/pkg/offering/common/reconcilechecks/fabricversion.go new file mode 100644 index 00000000..8816e5bf --- /dev/null +++ b/pkg/offering/common/reconcilechecks/fabricversion.go @@ -0,0 +1,137 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reconcilechecks + +import ( + "errors" + "fmt" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" + "github.com/IBM-Blockchain/fabric-operator/version" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("reconcile_checks") + +//go:generate counterfeiter -o mocks/instance.go -fake-name Instance . Instance + +// Instance is an instance of an IBP custom resource +type Instance interface { + GetArch() []string + GetRegistryURL() string + GetFabricVersion() string + SetFabricVersion(string) + ImagesSet() bool +} + +//go:generate counterfeiter -o mocks/update.go -fake-name Update . Update + +// Update defines update events we are interested in +type Update interface { + ImagesUpdated() bool + FabricVersionUpdated() bool +} + +// FabricVersionHelper is a helper function meant to be consumed by the different controllers to handle +// events on fabric version and images in specs +func FabricVersionHelper(instance Instance, versions *deployer.Versions, update Update) (bool, error) { + image := &images.Image{ + Versions: versions, + // DefaultRegistryURL: "hyperledger", // changing default for OSS + DefaultArch: "amd64", + } + + fv := &images.FabricVersion{ + Versions: versions, + } + + return FabricVersion(instance, update, image, fv) +} + +//go:generate counterfeiter -o mocks/image.go -fake-name Image . Image +// Image defines the contract with the image checks +type Image interface { + UpdateRequired(images.Update) bool + SetDefaults(images.Instance) error +} + +//go:generate counterfeiter -o mocks/version.go -fake-name Version . Version +// Version defines the contract with the version checks +type Version interface { + Normalize(images.FabricVersionInstance) string + Validate(images.FabricVersionInstance) error +} + +// FabricVersion is a lower-level call that requires all dependencies to be injected to handle +// events on fabric version and images in specs. It returns back two values, the first return +// value indicates if a spec change has been made. The second return value returns an error. +func FabricVersion(instance Instance, update Update, image Image, fv Version) (bool, error) { + var requeue bool + + fabricVersion := instance.GetFabricVersion() + if fabricVersion == "" { + return false, errors.New("fabric version is not set") + } + + // If fabric version is changed EXCEPT during migration, or images section is blank, then + // lookup default images associated with fabric version and update images in instance's spec + if update.FabricVersionUpdated() { + + // If fabric version update is triggered by migration of operator, then no changes required + if version.IsMigratedFabricVersion(instance.GetFabricVersion()) { + return false, nil + } + + log.Info(fmt.Sprintf("Images to be updated, fabric version changed, new fabric version is '%s'", fabricVersion)) + } + + if !instance.ImagesSet() { + log.Info(fmt.Sprintf("Images missing, setting to default images based on fabric version '%s'", fabricVersion)) + } + + // If images set, need to do further processing to determine if images need to be updated (overriden) based on events + // detected on fabric version + if instance.ImagesSet() { + required := image.UpdateRequired(update) + + if !required { + return false, nil + } + } + + // Normalize version to x.x.x-x + normalizedVersion := fv.Normalize(instance) + if instance.GetFabricVersion() != normalizedVersion { + instance.SetFabricVersion(normalizedVersion) + requeue = true + } + + if err := fv.Validate(instance); err != nil { + return false, err + } + + if err := image.SetDefaults(instance); err != nil { + return false, err + } + requeue = true + + return requeue, nil +} diff --git a/pkg/offering/common/reconcilechecks/fabricversion_test.go b/pkg/offering/common/reconcilechecks/fabricversion_test.go new file mode 100644 index 00000000..aed44f7e --- /dev/null +++ b/pkg/offering/common/reconcilechecks/fabricversion_test.go @@ -0,0 +1,200 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reconcilechecks_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/mocks" +) + +var _ = Describe("fabric version", func() { + var ( + instance *mocks.Instance + update *mocks.Update + image *mocks.Image + fv *mocks.Version + ) + + BeforeEach(func() { + instance = &mocks.Instance{} + update = &mocks.Update{} + image = &mocks.Image{} + fv = &mocks.Version{} + }) + + Context("create CR", func() { + It("returns an error if fabric version is not set in spec", func() { + _, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + + Context("images section blank", func() { + It("normalizes fabric version and requests a requeue", func() { + instance.GetFabricVersionReturns("1.4.9") + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(fv.NormalizeCallCount()).To(Equal(1)) + Expect(instance.SetFabricVersionCallCount()).To(Equal(1)) + }) + + It("returns an error if fabric version not supported", func() { + instance.GetFabricVersionReturns("0.0.1") + fv.ValidateReturns(errors.New("not supported")) + _, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).To(MatchError(ContainSubstring("not supported"))) + }) + + When("version is passed without hyphen", func() { + BeforeEach(func() { + instance.GetFabricVersionReturns("1.4.9") + }) + + It("finds default version for release and updates images section", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(image.SetDefaultsCallCount()).To(Equal(1)) + }) + }) + + When("version is passed with hyphen", func() { + BeforeEach(func() { + instance.GetFabricVersionReturns("1.4.9-0") + }) + + It("looks images and updates images section", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(image.SetDefaultsCallCount()).To(Equal(1)) + }) + }) + }) + + Context("images section passed", func() { + BeforeEach(func() { + instance.ImagesSetReturns(true) + }) + + When("version is not passed", func() { + It("returns an error", func() { + _, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).To(MatchError(ContainSubstring("fabric version is not set"))) + }) + }) + + When("version is passed", func() { + BeforeEach(func() { + instance.GetFabricVersionReturns("2.0.0-8") + }) + + It("persists current spec configuration", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(instance.SetFabricVersionCallCount()).To(Equal(0)) + Expect(fv.NormalizeCallCount()).To(Equal(0)) + Expect(fv.ValidateCallCount()).To(Equal(0)) + Expect(image.SetDefaultsCallCount()).To(Equal(0)) + }) + }) + }) + }) + + Context("update CR", func() { + BeforeEach(func() { + instance.GetFabricVersionReturns("2.0.1-0") + instance.ImagesSetReturns(true) + }) + + When("images updated", func() { + BeforeEach(func() { + update.ImagesUpdatedReturns(true) + }) + + Context("and version updated", func() { + BeforeEach(func() { + update.FabricVersionUpdatedReturns(true) + }) + + It("persists current spec configuration", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(fv.NormalizeCallCount()).To(Equal(0)) + Expect(instance.SetFabricVersionCallCount()).To(Equal(0)) + Expect(fv.ValidateCallCount()).To(Equal(0)) + Expect(image.SetDefaultsCallCount()).To(Equal(0)) + }) + }) + + Context("and version not updated", func() { + It("persists current spec configuration", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(fv.NormalizeCallCount()).To(Equal(0)) + Expect(instance.SetFabricVersionCallCount()).To(Equal(0)) + Expect(fv.ValidateCallCount()).To(Equal(0)) + Expect(image.SetDefaultsCallCount()).To(Equal(0)) + }) + }) + }) + + When("images not updated", func() { + Context("and version updated during operator migration", func() { + BeforeEach(func() { + instance.GetFabricVersionReturns("unsupported") + update.FabricVersionUpdatedReturns(true) + }) + + It("persists current spec configuration", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + Expect(image.UpdateRequiredCallCount()).To(Equal(0)) + Expect(fv.NormalizeCallCount()).To(Equal(0)) + Expect(instance.SetFabricVersionCallCount()).To(Equal(0)) + Expect(fv.ValidateCallCount()).To(Equal(0)) + Expect(image.SetDefaultsCallCount()).To(Equal(0)) + }) + }) + + Context("and version updated not due to operator migration", func() { + BeforeEach(func() { + image.UpdateRequiredReturns(true) + }) + + It("looks images and updates images section", func() { + requeue, err := reconcilechecks.FabricVersion(instance, update, image, fv) + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(true)) + Expect(fv.ValidateCallCount()).To(Equal(1)) + Expect(image.SetDefaultsCallCount()).To(Equal(1)) + }) + }) + }) + }) +}) diff --git a/pkg/offering/common/reconcilechecks/images/fabricversion.go b/pkg/offering/common/reconcilechecks/images/fabricversion.go new file mode 100644 index 00000000..e67d94ab --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/fabricversion.go @@ -0,0 +1,80 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package images + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" +) + +// FabricVersion handles validation on fabric version +type FabricVersion struct { + Versions *deployer.Versions +} + +//go:generate counterfeiter -o mocks/fabricversion.go -fake-name FabricVersionInstance . FabricVersionInstance + +// FabricVersionInstance defines the contract expected from instances +type FabricVersionInstance interface { + GetFabricVersion() string +} + +// Normalize normalizes the fabric version to x.x.x-x +func (fv *FabricVersion) Normalize(instance FabricVersionInstance) string { + var v interface{} + + switch instance.(type) { + case *current.IBPCA: + v = fv.Versions.CA + case *current.IBPPeer: + v = fv.Versions.Peer + case *current.IBPOrderer: + v = fv.Versions.Orderer + } + + return normalizeFabricVersion(instance.GetFabricVersion(), v) +} + +// Validate will interate through the keys in versions map and check to +// see if versions is present (valid) +func (fv *FabricVersion) Validate(instance FabricVersionInstance) error { + fabricVersion := instance.GetFabricVersion() + + switch instance.(type) { + case *current.IBPCA: + _, found := fv.Versions.CA[fabricVersion] + if !found { + return fmt.Errorf("fabric version '%s' is not supported for CA", fabricVersion) + } + case *current.IBPPeer: + _, found := fv.Versions.Peer[fabricVersion] + if !found { + return fmt.Errorf("fabric version '%s' is not supported for Peer", fabricVersion) + } + case *current.IBPOrderer: + _, found := fv.Versions.Orderer[fabricVersion] + if !found { + return fmt.Errorf("fabric version '%s' is not supported for Orderer", fabricVersion) + } + } + + return nil +} diff --git a/pkg/offering/common/reconcilechecks/images/fabricversion_test.go b/pkg/offering/common/reconcilechecks/images/fabricversion_test.go new file mode 100644 index 00000000..4a52b949 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/fabricversion_test.go @@ -0,0 +1,293 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package images_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" +) + +var _ = Describe("fabric version", func() { + var ( + fv *images.FabricVersion + operatorCfg *config.Operator + ) + + BeforeEach(func() { + operatorCfg = &config.Operator{ + Versions: &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4.9-0": { + Default: false, + Version: "1.4.9-0", + Image: deployer.CAImages{ + CAImage: "caimage", + CATag: "catag", + CAInitImage: "cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "enrolleriamge", + EnrollerTag: "enrollertag", + }, + }, + "1.4.9-1": { + Default: true, + Version: "1.4.9-1", + Image: deployer.CAImages{ + CAImage: "caimage", + CATag: "newcatag", + CAInitImage: "cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "enrolleriamge", + EnrollerTag: "enrollertag", + }, + }, + }, + Peer: map[string]deployer.VersionPeer{ + "1.4.9-0": { + Default: true, + Version: "1.4.9-0", + Image: deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "1.4.9-2511004-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-0": { + Default: false, + Version: "2.2.1-0", + Image: deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "2.2.1-2511004-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-1": { + Default: true, + Version: "2.2.1-1", + Image: deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "2.2.1-2511204-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + }, + Orderer: map[string]deployer.VersionOrderer{ + "1.4-9-0": { + Default: true, + Version: "1.4.9-0", + Image: deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "1.4.9-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-0": { + Default: false, + Version: "2.2.1-0", + Image: deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "2.2.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-1": { + Default: true, + Version: "2.2.1-0", + Image: deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "2.2.1-2511204-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + }, + }, + } + + fv = &images.FabricVersion{ + Versions: operatorCfg.Versions, + } + }) + + Context("ca", func() { + var ( + instance *current.IBPCA + ) + + Context("normalize version", func() { + When("using non-hyphenated fabric version", func() { + BeforeEach(func() { + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + FabricVersion: "1.4.9", + }, + } + }) + + It("returns default images for the base fabric version", func() { + version := fv.Normalize(instance) + Expect(version).To(Equal("1.4.9-1")) + }) + }) + }) + + Context("validate version", func() { + BeforeEach(func() { + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + FabricVersion: "1.8.9-1", + }, + } + }) + + It("returns error if unsupported version", func() { + err := fv.Validate(instance) + Expect(err).To(MatchError(ContainSubstring("is not supported for CA"))) + }) + }) + }) + + Context("peer", func() { + var ( + instance *current.IBPPeer + ) + + Context("normalize version", func() { + When("using non-hyphenated fabric version", func() { + BeforeEach(func() { + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + FabricVersion: "2.2.1", + }, + } + }) + + It("returns default images for the base fabric version", func() { + version := fv.Normalize(instance) + Expect(version).To(Equal("2.2.1-1")) + }) + }) + }) + + Context("validate version", func() { + BeforeEach(func() { + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + FabricVersion: "1.8.9-1", + }, + } + }) + + It("returns error if unsupported version", func() { + err := fv.Validate(instance) + Expect(err).To(MatchError(ContainSubstring("is not supported for Peer"))) + }) + }) + }) + + Context("orderer", func() { + var ( + instance *current.IBPOrderer + ) + + Context("normalize version", func() { + When("using non-hyphenated fabric version", func() { + BeforeEach(func() { + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + FabricVersion: "2.2.1", + }, + } + }) + + It("returns default images for the base fabric version", func() { + version := fv.Normalize(instance) + Expect(version).To(Equal("2.2.1-1")) + }) + }) + }) + + Context("validate version", func() { + BeforeEach(func() { + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + FabricVersion: "1.8.9-1", + }, + } + }) + + It("returns error if unsupported version", func() { + err := fv.Validate(instance) + Expect(err).To(MatchError(ContainSubstring("is not supported for Orderer"))) + }) + }) + }) +}) diff --git a/pkg/offering/common/reconcilechecks/images/images.go b/pkg/offering/common/reconcilechecks/images/images.go new file mode 100644 index 00000000..50808fc7 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/images.go @@ -0,0 +1,214 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package images + +import ( + "encoding/json" + "fmt" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("image_checks") + +//go:generate counterfeiter -o mocks/instance.go -fake-name Instance . Instance + +// Instance is an instance of an IBP custom resource +type Instance interface { + GetArch() []string + GetRegistryURL() string + GetFabricVersion() string + SetFabricVersion(string) + ImagesSet() bool +} + +//go:generate counterfeiter -o mocks/update.go -fake-name Update . Update + +// Update defines update events we are interested in +type Update interface { + ImagesUpdated() bool + FabricVersionUpdated() bool +} + +// Image handles checks and defaults on versions of images +type Image struct { + Versions *deployer.Versions + DefaultRegistryURL string + DefaultArch string +} + +// SetDefaults sets defaults on instance based on fabric version +func (i *Image) SetDefaults(instance Instance) error { + if !strings.Contains(instance.GetFabricVersion(), "-") { + return fmt.Errorf("fabric version format '%s' is not valid, must pass hyphenated version (e.g. 2.2.1-1)", instance.GetFabricVersion()) + } + + arch := i.DefaultArch + if len(instance.GetArch()) > 0 { + arch = instance.GetArch()[0] + } + + registryURL := i.DefaultRegistryURL + if instance.GetRegistryURL() != "" { + registryURL = instance.GetRegistryURL() + } + + // Add '/' at the end if not present in registry URL + if registryURL != "" && !strings.HasSuffix(registryURL, "/") { + registryURL = registryURL + "/" + } + + switch instance.(type) { + case *current.IBPCA: + return setDefaultCAImages(instance.(*current.IBPCA), arch, registryURL, i.Versions.CA) + case *current.IBPPeer: + return setDefaultPeerImages(instance.(*current.IBPPeer), arch, registryURL, i.Versions.Peer) + case *current.IBPOrderer: + return setDefaultOrdererImages(instance.(*current.IBPOrderer), arch, registryURL, i.Versions.Orderer) + } + + return nil +} + +// UpdateRequired process update events to determine if images needed to be updated. +func (i *Image) UpdateRequired(update Update) bool { + if update.ImagesUpdated() { + return false + } + + // If neither fabric version nor images updated or both fabric version and images updated, return since no changes + // made or required + if !update.ImagesUpdated() && !update.FabricVersionUpdated() { + return false + } + + if update.FabricVersionUpdated() { + return true + } + + return false +} + +func normalizeFabricVersion(fabricVersion string, versions interface{}) string { + switch versions.(type) { + case map[string]deployer.VersionCA: + if !strings.Contains(fabricVersion, "-") { + for version, config := range versions.(map[string]deployer.VersionCA) { + if strings.HasPrefix(version, fabricVersion) && config.Default { + return version + } + } + } + case map[string]deployer.VersionPeer: + if !strings.Contains(fabricVersion, "-") { + for version, config := range versions.(map[string]deployer.VersionPeer) { + if strings.HasPrefix(version, fabricVersion) && config.Default { + return version + } + } + } + case map[string]deployer.VersionOrderer: + if !strings.Contains(fabricVersion, "-") { + for version, config := range versions.(map[string]deployer.VersionOrderer) { + if strings.HasPrefix(version, fabricVersion) && config.Default { + return version + } + } + } + } + + return fabricVersion +} + +func setDefaultCAImages(instance *current.IBPCA, arch, registryURL string, versions map[string]deployer.VersionCA) error { + fabricVersion := instance.Spec.FabricVersion + log.Info(fmt.Sprintf("Using default images for instance '%s' for fabric version '%s'", instance.GetName(), fabricVersion)) + + version, found := versions[fabricVersion] + if !found { + return fmt.Errorf("no default CA images defined for fabric version '%s'", fabricVersion) + } + + version.Image.Override(nil, registryURL, arch) + specVersions := ¤t.CAImages{} + versionBytes, err := json.Marshal(version.Image) + if err != nil { + return err + } + err = json.Unmarshal(versionBytes, specVersions) + if err != nil { + return err + } + instance.Spec.Images = specVersions + + return nil +} + +func setDefaultPeerImages(instance *current.IBPPeer, arch, registryURL string, versions map[string]deployer.VersionPeer) error { + fabricVersion := instance.Spec.FabricVersion + log.Info(fmt.Sprintf("Using default images for instance '%s' for fabric version '%s'", instance.GetName(), fabricVersion)) + + version, found := versions[fabricVersion] + if !found { + return fmt.Errorf("no default Peer images defined for fabric version '%s'", fabricVersion) + } + + version.Image.Override(nil, registryURL, arch) + specVersions := ¤t.PeerImages{} + versionBytes, err := json.Marshal(version.Image) + if err != nil { + return err + } + err = json.Unmarshal(versionBytes, specVersions) + if err != nil { + return err + } + instance.Spec.Images = specVersions + + return nil +} + +func setDefaultOrdererImages(instance *current.IBPOrderer, arch, registryURL string, versions map[string]deployer.VersionOrderer) error { + fabricVersion := instance.Spec.FabricVersion + log.Info(fmt.Sprintf("Using default images for instance '%s' for fabric version '%s'", instance.GetName(), fabricVersion)) + + version, found := versions[fabricVersion] + if !found { + return fmt.Errorf("no default Orderer images defined for fabric version '%s'", fabricVersion) + } + + version.Image.Override(nil, registryURL, arch) + + specVersions := ¤t.OrdererImages{} + versionBytes, err := json.Marshal(version.Image) + if err != nil { + return err + } + err = json.Unmarshal(versionBytes, specVersions) + if err != nil { + return err + } + instance.Spec.Images = specVersions + + return nil +} diff --git a/pkg/offering/common/reconcilechecks/images/images_suite_test.go b/pkg/offering/common/reconcilechecks/images/images_suite_test.go new file mode 100644 index 00000000..ab2967f4 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/images_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package images_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestImages(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Images Suite") +} diff --git a/pkg/offering/common/reconcilechecks/images/images_test.go b/pkg/offering/common/reconcilechecks/images/images_test.go new file mode 100644 index 00000000..8ee1085f --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/images_test.go @@ -0,0 +1,571 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package images_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images/mocks" +) + +var _ = Describe("default images", func() { + var ( + operatorCfg *config.Operator + ) + + BeforeEach(func() { + operatorCfg = &config.Operator{ + Versions: &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4.9-0": { + Default: false, + Version: "1.4.9-0", + Image: deployer.CAImages{ + CAImage: "caimage", + CATag: "catag", + CAInitImage: "cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "enrolleriamge", + EnrollerTag: "enrollertag", + }, + }, + "1.4.9-1": { + Default: true, + Version: "1.4.9-1", + Image: deployer.CAImages{ + CAImage: "caimage", + CATag: "newcatag", + CAInitImage: "cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "enrolleriamge", + EnrollerTag: "enrollertag", + }, + }, + }, + Peer: map[string]deployer.VersionPeer{ + "1.4.9-0": { + Default: true, + Version: "1.4.9-0", + Image: deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "1.4.9-2511004-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-0": { + Default: false, + Version: "2.2.1-0", + Image: deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "2.2.1-2511004-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-1": { + Default: true, + Version: "2.2.1-1", + Image: deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "2.2.1-2511204-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + }, + Orderer: map[string]deployer.VersionOrderer{ + "1.4-9-0": { + Default: true, + Version: "1.4.9-0", + Image: deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "1.4.9-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-0": { + Default: false, + Version: "2.2.1-0", + Image: deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "2.2.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + "2.2.1-1": { + Default: true, + Version: "2.2.1-0", + Image: deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "2.2.1-2511204-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + }, + }, + }, + }, + } + }) + + Context("images", func() { + var image *images.Image + + BeforeEach(func() { + image = &images.Image{ + Versions: operatorCfg.Versions, + DefaultArch: "amd64", + // DefaultRegistryURL: "", + } + }) + + It("returns an error if fabric version is not in correct format", func() { + instance := &mocks.Instance{} + instance.GetFabricVersionReturns("1.4.9") + err := image.SetDefaults(instance) + Expect(err).To(MatchError("fabric version format '1.4.9' is not valid, must pass hyphenated version (e.g. 2.2.1-1)")) + }) + + Context("update required", func() { + var update *mocks.Update + + BeforeEach(func() { + update = &mocks.Update{} + }) + + It("returns false if images updated", func() { + update.ImagesUpdatedReturns(true) + required := image.UpdateRequired(update) + Expect(required).To(Equal(false)) + }) + + It("returns false if neither images nor fabric version updated", func() { + required := image.UpdateRequired(update) + Expect(required).To(Equal(false)) + }) + + It("returns true if fabric version updated and images not updated", func() { + update.FabricVersionUpdatedReturns(true) + required := image.UpdateRequired(update) + Expect(required).To(Equal(true)) + }) + }) + + Context("ca", func() { + var ( + instance *current.IBPCA + ) + + BeforeEach(func() { + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + RegistryURL: "ghcr.io/ibm-blockchain/", + FabricVersion: "1.4.9-1", + }, + } + }) + + Context("registry url", func() { + When("is not set", func() { + BeforeEach(func() { + instance.Spec.RegistryURL = "" + }) + + It("sets default images based on operator's config with registry of blank", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + caImages := deployer.CAImages{ + CAImage: "caimage", + CATag: "newcatag", + CAInitImage: "cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "enrolleriamge", + EnrollerTag: "enrollertag", + } + Expect(instance.Spec.Images.CAImage).To(Equal(caImages.CAImage)) + Expect(instance.Spec.Images.CATag).To(Equal(caImages.CATag)) + Expect(instance.Spec.Images.CAInitImage).To(Equal(caImages.CAInitImage)) + Expect(instance.Spec.Images.CAInitTag).To(Equal(caImages.CAInitTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(caImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(caImages.EnrollerTag)) + }) + }) + + When("is set", func() { + It("sets default images based on operator's config", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + caImages := deployer.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "newcatag", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "ghcr.io/ibm-blockchain/enrolleriamge", + EnrollerTag: "enrollertag", + } + + Expect(instance.Spec.Images.CAImage).To(Equal(caImages.CAImage)) + Expect(instance.Spec.Images.CATag).To(Equal(caImages.CATag)) + Expect(instance.Spec.Images.CAInitImage).To(Equal(caImages.CAInitImage)) + Expect(instance.Spec.Images.CAInitTag).To(Equal(caImages.CAInitTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(caImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(caImages.EnrollerTag)) + }) + }) + }) + + When("using normalized fabric version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "1.4.9-0" + }) + + It("returns default images for the base fabric version", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + + caImages := deployer.CAImages{ + CAImage: "ghcr.io/ibm-blockchain/caimage", + CATag: "catag", + CAInitImage: "ghcr.io/ibm-blockchain/cainitimage", + CAInitTag: "cainittag", + EnrollerImage: "ghcr.io/ibm-blockchain/enrolleriamge", + EnrollerTag: "enrollertag", + } + + Expect(instance.Spec.Images.CAImage).To(Equal(caImages.CAImage)) + Expect(instance.Spec.Images.CATag).To(Equal(caImages.CATag)) + Expect(instance.Spec.Images.CAInitImage).To(Equal(caImages.CAInitImage)) + Expect(instance.Spec.Images.CAInitTag).To(Equal(caImages.CAInitTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(caImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(caImages.EnrollerTag)) + }) + }) + + It("returns error if requested version not found", func() { + instance.Spec.FabricVersion = "5.1.0-1" + err := image.SetDefaults(instance) + Expect(err).To(HaveOccurred()) + }) + }) + + Context("peer", func() { + var ( + instance *current.IBPPeer + ) + + BeforeEach(func() { + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + RegistryURL: "ghcr.io/ibm-blockchain/", + FabricVersion: "2.2.1-1", + }, + } + }) + + Context("registy URL", func() { + When("is not set", func() { + BeforeEach(func() { + instance.Spec.RegistryURL = "" + }) + + It("sets registry URL to blank", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + peerImages := deployer.PeerImages{ + PeerInitImage: "ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ibp-peer", + PeerTag: "2.2.1-2511204-amd64", + DindImage: "ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + } + + Expect(instance.Spec.Images.PeerInitImage).To(Equal(peerImages.PeerInitImage)) + Expect(instance.Spec.Images.PeerInitTag).To(Equal(peerImages.PeerInitTag)) + Expect(instance.Spec.Images.PeerImage).To(Equal(peerImages.PeerImage)) + Expect(instance.Spec.Images.PeerTag).To(Equal(peerImages.PeerTag)) + Expect(instance.Spec.Images.DindImage).To(Equal(peerImages.DindImage)) + Expect(instance.Spec.Images.DindTag).To(Equal(peerImages.DindTag)) + Expect(instance.Spec.Images.CouchDBImage).To(Equal(peerImages.CouchDBImage)) + Expect(instance.Spec.Images.CouchDBTag).To(Equal(peerImages.CouchDBTag)) + Expect(instance.Spec.Images.GRPCWebImage).To(Equal(peerImages.GRPCWebImage)) + Expect(instance.Spec.Images.GRPCWebTag).To(Equal(peerImages.GRPCWebTag)) + Expect(instance.Spec.Images.FluentdImage).To(Equal(peerImages.FluentdImage)) + Expect(instance.Spec.Images.FluentdTag).To(Equal(peerImages.FluentdTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(peerImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(peerImages.EnrollerTag)) + }) + }) + + When("is set", func() { + It("sets the requested registry URL", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + peerImages := deployer.PeerImages{ + PeerInitImage: "ghcr.io/ibm-blockchain/ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ghcr.io/ibm-blockchain/ibp-peer", + PeerTag: "2.2.1-2511204-amd64", + DindImage: "ghcr.io/ibm-blockchain/ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ghcr.io/ibm-blockchain/ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ghcr.io/ibm-blockchain/ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ghcr.io/ibm-blockchain/ibp-enroller", + EnrollerTag: "1.0.0-amd64", + } + + Expect(instance.Spec.Images.PeerInitImage).To(Equal(peerImages.PeerInitImage)) + Expect(instance.Spec.Images.PeerInitTag).To(Equal(peerImages.PeerInitTag)) + Expect(instance.Spec.Images.PeerImage).To(Equal(peerImages.PeerImage)) + Expect(instance.Spec.Images.PeerTag).To(Equal(peerImages.PeerTag)) + Expect(instance.Spec.Images.DindImage).To(Equal(peerImages.DindImage)) + Expect(instance.Spec.Images.DindTag).To(Equal(peerImages.DindTag)) + Expect(instance.Spec.Images.CouchDBImage).To(Equal(peerImages.CouchDBImage)) + Expect(instance.Spec.Images.CouchDBTag).To(Equal(peerImages.CouchDBTag)) + Expect(instance.Spec.Images.GRPCWebImage).To(Equal(peerImages.GRPCWebImage)) + Expect(instance.Spec.Images.GRPCWebTag).To(Equal(peerImages.GRPCWebTag)) + Expect(instance.Spec.Images.FluentdImage).To(Equal(peerImages.FluentdImage)) + Expect(instance.Spec.Images.FluentdTag).To(Equal(peerImages.FluentdTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(peerImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(peerImages.EnrollerTag)) + }) + }) + + }) + + When("using normalized fabric version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.2.1-0" + }) + + It("returns images for the requested fabric version", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + peerImages := deployer.PeerImages{ + PeerInitImage: "ghcr.io/ibm-blockchain/ibp-init", + PeerInitTag: "2.5.1-2511004-amd64", + PeerImage: "ghcr.io/ibm-blockchain/ibp-peer", + PeerTag: "2.2.1-2511004-amd64", + DindImage: "ghcr.io/ibm-blockchain/ibp-dind", + DindTag: "noimages-amd64", + CouchDBImage: "ghcr.io/ibm-blockchain/ibp-couchdb", + CouchDBTag: "2.3.1-2511004-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + FluentdImage: "ghcr.io/ibm-blockchain/ibp-fluentd", + FluentdTag: "2.5.1-2511004-amd64", + EnrollerImage: "ghcr.io/ibm-blockchain/ibp-enroller", + EnrollerTag: "1.0.0-amd64", + } + + Expect(instance.Spec.Images.PeerInitImage).To(Equal(peerImages.PeerInitImage)) + Expect(instance.Spec.Images.PeerInitTag).To(Equal(peerImages.PeerInitTag)) + Expect(instance.Spec.Images.PeerImage).To(Equal(peerImages.PeerImage)) + Expect(instance.Spec.Images.PeerTag).To(Equal(peerImages.PeerTag)) + Expect(instance.Spec.Images.DindImage).To(Equal(peerImages.DindImage)) + Expect(instance.Spec.Images.DindTag).To(Equal(peerImages.DindTag)) + Expect(instance.Spec.Images.CouchDBImage).To(Equal(peerImages.CouchDBImage)) + Expect(instance.Spec.Images.CouchDBTag).To(Equal(peerImages.CouchDBTag)) + Expect(instance.Spec.Images.GRPCWebImage).To(Equal(peerImages.GRPCWebImage)) + Expect(instance.Spec.Images.GRPCWebTag).To(Equal(peerImages.GRPCWebTag)) + Expect(instance.Spec.Images.FluentdImage).To(Equal(peerImages.FluentdImage)) + Expect(instance.Spec.Images.FluentdTag).To(Equal(peerImages.FluentdTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(peerImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(peerImages.EnrollerTag)) + }) + }) + + It("returns error if requested version not found", func() { + instance.Spec.FabricVersion = "5.1.0-1" + err := image.SetDefaults(instance) + Expect(err).To(HaveOccurred()) + }) + }) + + Context("orderer", func() { + var ( + instance *current.IBPOrderer + ) + + BeforeEach(func() { + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + RegistryURL: "ghcr.io/ibm-blockchain/", + FabricVersion: "2.2.1-1", + }, + } + }) + + Context("registry URL", func() { + When("is not set", func() { + BeforeEach(func() { + instance.Spec.RegistryURL = "" + }) + + It("sets default images based on operator's config with registry of blank", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + ordererImages := deployer.OrdererImages{ + OrdererInitImage: "ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ibp-orderer", + OrdererTag: "2.2.1-2511204-amd64", + GRPCWebImage: "ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ibp-enroller", + EnrollerTag: "1.0.0-amd64", + } + + Expect(instance.Spec.Images.OrdererInitImage).To(Equal(ordererImages.OrdererInitImage)) + Expect(instance.Spec.Images.OrdererInitTag).To(Equal(ordererImages.OrdererInitTag)) + Expect(instance.Spec.Images.OrdererImage).To(Equal(ordererImages.OrdererImage)) + Expect(instance.Spec.Images.OrdererTag).To(Equal(ordererImages.OrdererTag)) + Expect(instance.Spec.Images.GRPCWebImage).To(Equal(ordererImages.GRPCWebImage)) + Expect(instance.Spec.Images.GRPCWebTag).To(Equal(ordererImages.GRPCWebTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(ordererImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(ordererImages.EnrollerTag)) + }) + }) + + When("is set", func() { + It("sets default images based on operator's config", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + ordererImages := deployer.OrdererImages{ + OrdererInitImage: "ghcr.io/ibm-blockchain/ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ghcr.io/ibm-blockchain/ibp-orderer", + OrdererTag: "2.2.1-2511204-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ghcr.io/ibm-blockchain/ibp-enroller", + EnrollerTag: "1.0.0-amd64", + } + + Expect(instance.Spec.Images.OrdererInitImage).To(Equal(ordererImages.OrdererInitImage)) + Expect(instance.Spec.Images.OrdererInitTag).To(Equal(ordererImages.OrdererInitTag)) + Expect(instance.Spec.Images.OrdererImage).To(Equal(ordererImages.OrdererImage)) + Expect(instance.Spec.Images.OrdererTag).To(Equal(ordererImages.OrdererTag)) + Expect(instance.Spec.Images.GRPCWebImage).To(Equal(ordererImages.GRPCWebImage)) + Expect(instance.Spec.Images.GRPCWebTag).To(Equal(ordererImages.GRPCWebTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(ordererImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(ordererImages.EnrollerTag)) + }) + }) + }) + + When("using normalized fabric version", func() { + BeforeEach(func() { + instance.Spec.FabricVersion = "2.2.1-0" + }) + + It("returns default images for the base fabric version", func() { + err := image.SetDefaults(instance) + Expect(err).NotTo(HaveOccurred()) + ordererImages := deployer.OrdererImages{ + OrdererInitImage: "ghcr.io/ibm-blockchain/ibp-init", + OrdererInitTag: "2.5.1-2511004-amd64", + OrdererImage: "ghcr.io/ibm-blockchain/ibp-orderer", + OrdererTag: "2.2.1-2511004-amd64", + GRPCWebImage: "ghcr.io/ibm-blockchain/ibp-grpcweb", + GRPCWebTag: "2.5.1-2511004-amd64", + EnrollerImage: "ghcr.io/ibm-blockchain/ibp-enroller", + EnrollerTag: "1.0.0-amd64", + } + + Expect(instance.Spec.Images.OrdererInitImage).To(Equal(ordererImages.OrdererInitImage)) + Expect(instance.Spec.Images.OrdererInitTag).To(Equal(ordererImages.OrdererInitTag)) + Expect(instance.Spec.Images.OrdererImage).To(Equal(ordererImages.OrdererImage)) + Expect(instance.Spec.Images.OrdererTag).To(Equal(ordererImages.OrdererTag)) + Expect(instance.Spec.Images.GRPCWebImage).To(Equal(ordererImages.GRPCWebImage)) + Expect(instance.Spec.Images.GRPCWebTag).To(Equal(ordererImages.GRPCWebTag)) + Expect(instance.Spec.Images.EnrollerImage).To(Equal(ordererImages.EnrollerImage)) + Expect(instance.Spec.Images.EnrollerTag).To(Equal(ordererImages.EnrollerTag)) + }) + }) + + It("returns error if requested version not found", func() { + instance.Spec.FabricVersion = "5.1.0-1" + err := image.SetDefaults(instance) + Expect(err).To(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/offering/common/reconcilechecks/images/mocks/fabricversion.go b/pkg/offering/common/reconcilechecks/images/mocks/fabricversion.go new file mode 100644 index 00000000..f2a31ca0 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/mocks/fabricversion.go @@ -0,0 +1,102 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" +) + +type FabricVersionInstance struct { + GetFabricVersionStub func() string + getFabricVersionMutex sync.RWMutex + getFabricVersionArgsForCall []struct { + } + getFabricVersionReturns struct { + result1 string + } + getFabricVersionReturnsOnCall map[int]struct { + result1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FabricVersionInstance) GetFabricVersion() string { + fake.getFabricVersionMutex.Lock() + ret, specificReturn := fake.getFabricVersionReturnsOnCall[len(fake.getFabricVersionArgsForCall)] + fake.getFabricVersionArgsForCall = append(fake.getFabricVersionArgsForCall, struct { + }{}) + stub := fake.GetFabricVersionStub + fakeReturns := fake.getFabricVersionReturns + fake.recordInvocation("GetFabricVersion", []interface{}{}) + fake.getFabricVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FabricVersionInstance) GetFabricVersionCallCount() int { + fake.getFabricVersionMutex.RLock() + defer fake.getFabricVersionMutex.RUnlock() + return len(fake.getFabricVersionArgsForCall) +} + +func (fake *FabricVersionInstance) GetFabricVersionCalls(stub func() string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = stub +} + +func (fake *FabricVersionInstance) GetFabricVersionReturns(result1 string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = nil + fake.getFabricVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *FabricVersionInstance) GetFabricVersionReturnsOnCall(i int, result1 string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = nil + if fake.getFabricVersionReturnsOnCall == nil { + fake.getFabricVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getFabricVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *FabricVersionInstance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getFabricVersionMutex.RLock() + defer fake.getFabricVersionMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FabricVersionInstance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ images.FabricVersionInstance = new(FabricVersionInstance) diff --git a/pkg/offering/common/reconcilechecks/images/mocks/instance.go b/pkg/offering/common/reconcilechecks/images/mocks/instance.go new file mode 100644 index 00000000..c8a50258 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/mocks/instance.go @@ -0,0 +1,336 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" +) + +type Instance struct { + GetArchStub func() []string + getArchMutex sync.RWMutex + getArchArgsForCall []struct { + } + getArchReturns struct { + result1 []string + } + getArchReturnsOnCall map[int]struct { + result1 []string + } + GetFabricVersionStub func() string + getFabricVersionMutex sync.RWMutex + getFabricVersionArgsForCall []struct { + } + getFabricVersionReturns struct { + result1 string + } + getFabricVersionReturnsOnCall map[int]struct { + result1 string + } + GetRegistryURLStub func() string + getRegistryURLMutex sync.RWMutex + getRegistryURLArgsForCall []struct { + } + getRegistryURLReturns struct { + result1 string + } + getRegistryURLReturnsOnCall map[int]struct { + result1 string + } + ImagesSetStub func() bool + imagesSetMutex sync.RWMutex + imagesSetArgsForCall []struct { + } + imagesSetReturns struct { + result1 bool + } + imagesSetReturnsOnCall map[int]struct { + result1 bool + } + SetFabricVersionStub func(string) + setFabricVersionMutex sync.RWMutex + setFabricVersionArgsForCall []struct { + arg1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Instance) GetArch() []string { + fake.getArchMutex.Lock() + ret, specificReturn := fake.getArchReturnsOnCall[len(fake.getArchArgsForCall)] + fake.getArchArgsForCall = append(fake.getArchArgsForCall, struct { + }{}) + stub := fake.GetArchStub + fakeReturns := fake.getArchReturns + fake.recordInvocation("GetArch", []interface{}{}) + fake.getArchMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetArchCallCount() int { + fake.getArchMutex.RLock() + defer fake.getArchMutex.RUnlock() + return len(fake.getArchArgsForCall) +} + +func (fake *Instance) GetArchCalls(stub func() []string) { + fake.getArchMutex.Lock() + defer fake.getArchMutex.Unlock() + fake.GetArchStub = stub +} + +func (fake *Instance) GetArchReturns(result1 []string) { + fake.getArchMutex.Lock() + defer fake.getArchMutex.Unlock() + fake.GetArchStub = nil + fake.getArchReturns = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetArchReturnsOnCall(i int, result1 []string) { + fake.getArchMutex.Lock() + defer fake.getArchMutex.Unlock() + fake.GetArchStub = nil + if fake.getArchReturnsOnCall == nil { + fake.getArchReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getArchReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetFabricVersion() string { + fake.getFabricVersionMutex.Lock() + ret, specificReturn := fake.getFabricVersionReturnsOnCall[len(fake.getFabricVersionArgsForCall)] + fake.getFabricVersionArgsForCall = append(fake.getFabricVersionArgsForCall, struct { + }{}) + stub := fake.GetFabricVersionStub + fakeReturns := fake.getFabricVersionReturns + fake.recordInvocation("GetFabricVersion", []interface{}{}) + fake.getFabricVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetFabricVersionCallCount() int { + fake.getFabricVersionMutex.RLock() + defer fake.getFabricVersionMutex.RUnlock() + return len(fake.getFabricVersionArgsForCall) +} + +func (fake *Instance) GetFabricVersionCalls(stub func() string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = stub +} + +func (fake *Instance) GetFabricVersionReturns(result1 string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = nil + fake.getFabricVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetFabricVersionReturnsOnCall(i int, result1 string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = nil + if fake.getFabricVersionReturnsOnCall == nil { + fake.getFabricVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getFabricVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetRegistryURL() string { + fake.getRegistryURLMutex.Lock() + ret, specificReturn := fake.getRegistryURLReturnsOnCall[len(fake.getRegistryURLArgsForCall)] + fake.getRegistryURLArgsForCall = append(fake.getRegistryURLArgsForCall, struct { + }{}) + stub := fake.GetRegistryURLStub + fakeReturns := fake.getRegistryURLReturns + fake.recordInvocation("GetRegistryURL", []interface{}{}) + fake.getRegistryURLMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetRegistryURLCallCount() int { + fake.getRegistryURLMutex.RLock() + defer fake.getRegistryURLMutex.RUnlock() + return len(fake.getRegistryURLArgsForCall) +} + +func (fake *Instance) GetRegistryURLCalls(stub func() string) { + fake.getRegistryURLMutex.Lock() + defer fake.getRegistryURLMutex.Unlock() + fake.GetRegistryURLStub = stub +} + +func (fake *Instance) GetRegistryURLReturns(result1 string) { + fake.getRegistryURLMutex.Lock() + defer fake.getRegistryURLMutex.Unlock() + fake.GetRegistryURLStub = nil + fake.getRegistryURLReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetRegistryURLReturnsOnCall(i int, result1 string) { + fake.getRegistryURLMutex.Lock() + defer fake.getRegistryURLMutex.Unlock() + fake.GetRegistryURLStub = nil + if fake.getRegistryURLReturnsOnCall == nil { + fake.getRegistryURLReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getRegistryURLReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) ImagesSet() bool { + fake.imagesSetMutex.Lock() + ret, specificReturn := fake.imagesSetReturnsOnCall[len(fake.imagesSetArgsForCall)] + fake.imagesSetArgsForCall = append(fake.imagesSetArgsForCall, struct { + }{}) + stub := fake.ImagesSetStub + fakeReturns := fake.imagesSetReturns + fake.recordInvocation("ImagesSet", []interface{}{}) + fake.imagesSetMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) ImagesSetCallCount() int { + fake.imagesSetMutex.RLock() + defer fake.imagesSetMutex.RUnlock() + return len(fake.imagesSetArgsForCall) +} + +func (fake *Instance) ImagesSetCalls(stub func() bool) { + fake.imagesSetMutex.Lock() + defer fake.imagesSetMutex.Unlock() + fake.ImagesSetStub = stub +} + +func (fake *Instance) ImagesSetReturns(result1 bool) { + fake.imagesSetMutex.Lock() + defer fake.imagesSetMutex.Unlock() + fake.ImagesSetStub = nil + fake.imagesSetReturns = struct { + result1 bool + }{result1} +} + +func (fake *Instance) ImagesSetReturnsOnCall(i int, result1 bool) { + fake.imagesSetMutex.Lock() + defer fake.imagesSetMutex.Unlock() + fake.ImagesSetStub = nil + if fake.imagesSetReturnsOnCall == nil { + fake.imagesSetReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesSetReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Instance) SetFabricVersion(arg1 string) { + fake.setFabricVersionMutex.Lock() + fake.setFabricVersionArgsForCall = append(fake.setFabricVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetFabricVersionStub + fake.recordInvocation("SetFabricVersion", []interface{}{arg1}) + fake.setFabricVersionMutex.Unlock() + if stub != nil { + fake.SetFabricVersionStub(arg1) + } +} + +func (fake *Instance) SetFabricVersionCallCount() int { + fake.setFabricVersionMutex.RLock() + defer fake.setFabricVersionMutex.RUnlock() + return len(fake.setFabricVersionArgsForCall) +} + +func (fake *Instance) SetFabricVersionCalls(stub func(string)) { + fake.setFabricVersionMutex.Lock() + defer fake.setFabricVersionMutex.Unlock() + fake.SetFabricVersionStub = stub +} + +func (fake *Instance) SetFabricVersionArgsForCall(i int) string { + fake.setFabricVersionMutex.RLock() + defer fake.setFabricVersionMutex.RUnlock() + argsForCall := fake.setFabricVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getArchMutex.RLock() + defer fake.getArchMutex.RUnlock() + fake.getFabricVersionMutex.RLock() + defer fake.getFabricVersionMutex.RUnlock() + fake.getRegistryURLMutex.RLock() + defer fake.getRegistryURLMutex.RUnlock() + fake.imagesSetMutex.RLock() + defer fake.imagesSetMutex.RUnlock() + fake.setFabricVersionMutex.RLock() + defer fake.setFabricVersionMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Instance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ images.Instance = new(Instance) diff --git a/pkg/offering/common/reconcilechecks/images/mocks/update.go b/pkg/offering/common/reconcilechecks/images/mocks/update.go new file mode 100644 index 00000000..33890af9 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/images/mocks/update.go @@ -0,0 +1,167 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" +) + +type Update struct { + FabricVersionUpdatedStub func() bool + fabricVersionUpdatedMutex sync.RWMutex + fabricVersionUpdatedArgsForCall []struct { + } + fabricVersionUpdatedReturns struct { + result1 bool + } + fabricVersionUpdatedReturnsOnCall map[int]struct { + result1 bool + } + ImagesUpdatedStub func() bool + imagesUpdatedMutex sync.RWMutex + imagesUpdatedArgsForCall []struct { + } + imagesUpdatedReturns struct { + result1 bool + } + imagesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Update) FabricVersionUpdated() bool { + fake.fabricVersionUpdatedMutex.Lock() + ret, specificReturn := fake.fabricVersionUpdatedReturnsOnCall[len(fake.fabricVersionUpdatedArgsForCall)] + fake.fabricVersionUpdatedArgsForCall = append(fake.fabricVersionUpdatedArgsForCall, struct { + }{}) + stub := fake.FabricVersionUpdatedStub + fakeReturns := fake.fabricVersionUpdatedReturns + fake.recordInvocation("FabricVersionUpdated", []interface{}{}) + fake.fabricVersionUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) FabricVersionUpdatedCallCount() int { + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + return len(fake.fabricVersionUpdatedArgsForCall) +} + +func (fake *Update) FabricVersionUpdatedCalls(stub func() bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = stub +} + +func (fake *Update) FabricVersionUpdatedReturns(result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + fake.fabricVersionUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdatedReturnsOnCall(i int, result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + if fake.fabricVersionUpdatedReturnsOnCall == nil { + fake.fabricVersionUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.fabricVersionUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdated() bool { + fake.imagesUpdatedMutex.Lock() + ret, specificReturn := fake.imagesUpdatedReturnsOnCall[len(fake.imagesUpdatedArgsForCall)] + fake.imagesUpdatedArgsForCall = append(fake.imagesUpdatedArgsForCall, struct { + }{}) + stub := fake.ImagesUpdatedStub + fakeReturns := fake.imagesUpdatedReturns + fake.recordInvocation("ImagesUpdated", []interface{}{}) + fake.imagesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ImagesUpdatedCallCount() int { + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + return len(fake.imagesUpdatedArgsForCall) +} + +func (fake *Update) ImagesUpdatedCalls(stub func() bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = stub +} + +func (fake *Update) ImagesUpdatedReturns(result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + fake.imagesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdatedReturnsOnCall(i int, result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + if fake.imagesUpdatedReturnsOnCall == nil { + fake.imagesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Update) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ images.Update = new(Update) diff --git a/pkg/offering/common/reconcilechecks/mocks/image.go b/pkg/offering/common/reconcilechecks/mocks/image.go new file mode 100644 index 00000000..6138861e --- /dev/null +++ b/pkg/offering/common/reconcilechecks/mocks/image.go @@ -0,0 +1,186 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" +) + +type Image struct { + SetDefaultsStub func(images.Instance) error + setDefaultsMutex sync.RWMutex + setDefaultsArgsForCall []struct { + arg1 images.Instance + } + setDefaultsReturns struct { + result1 error + } + setDefaultsReturnsOnCall map[int]struct { + result1 error + } + UpdateRequiredStub func(images.Update) bool + updateRequiredMutex sync.RWMutex + updateRequiredArgsForCall []struct { + arg1 images.Update + } + updateRequiredReturns struct { + result1 bool + } + updateRequiredReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Image) SetDefaults(arg1 images.Instance) error { + fake.setDefaultsMutex.Lock() + ret, specificReturn := fake.setDefaultsReturnsOnCall[len(fake.setDefaultsArgsForCall)] + fake.setDefaultsArgsForCall = append(fake.setDefaultsArgsForCall, struct { + arg1 images.Instance + }{arg1}) + stub := fake.SetDefaultsStub + fakeReturns := fake.setDefaultsReturns + fake.recordInvocation("SetDefaults", []interface{}{arg1}) + fake.setDefaultsMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Image) SetDefaultsCallCount() int { + fake.setDefaultsMutex.RLock() + defer fake.setDefaultsMutex.RUnlock() + return len(fake.setDefaultsArgsForCall) +} + +func (fake *Image) SetDefaultsCalls(stub func(images.Instance) error) { + fake.setDefaultsMutex.Lock() + defer fake.setDefaultsMutex.Unlock() + fake.SetDefaultsStub = stub +} + +func (fake *Image) SetDefaultsArgsForCall(i int) images.Instance { + fake.setDefaultsMutex.RLock() + defer fake.setDefaultsMutex.RUnlock() + argsForCall := fake.setDefaultsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Image) SetDefaultsReturns(result1 error) { + fake.setDefaultsMutex.Lock() + defer fake.setDefaultsMutex.Unlock() + fake.SetDefaultsStub = nil + fake.setDefaultsReturns = struct { + result1 error + }{result1} +} + +func (fake *Image) SetDefaultsReturnsOnCall(i int, result1 error) { + fake.setDefaultsMutex.Lock() + defer fake.setDefaultsMutex.Unlock() + fake.SetDefaultsStub = nil + if fake.setDefaultsReturnsOnCall == nil { + fake.setDefaultsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.setDefaultsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Image) UpdateRequired(arg1 images.Update) bool { + fake.updateRequiredMutex.Lock() + ret, specificReturn := fake.updateRequiredReturnsOnCall[len(fake.updateRequiredArgsForCall)] + fake.updateRequiredArgsForCall = append(fake.updateRequiredArgsForCall, struct { + arg1 images.Update + }{arg1}) + stub := fake.UpdateRequiredStub + fakeReturns := fake.updateRequiredReturns + fake.recordInvocation("UpdateRequired", []interface{}{arg1}) + fake.updateRequiredMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Image) UpdateRequiredCallCount() int { + fake.updateRequiredMutex.RLock() + defer fake.updateRequiredMutex.RUnlock() + return len(fake.updateRequiredArgsForCall) +} + +func (fake *Image) UpdateRequiredCalls(stub func(images.Update) bool) { + fake.updateRequiredMutex.Lock() + defer fake.updateRequiredMutex.Unlock() + fake.UpdateRequiredStub = stub +} + +func (fake *Image) UpdateRequiredArgsForCall(i int) images.Update { + fake.updateRequiredMutex.RLock() + defer fake.updateRequiredMutex.RUnlock() + argsForCall := fake.updateRequiredArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Image) UpdateRequiredReturns(result1 bool) { + fake.updateRequiredMutex.Lock() + defer fake.updateRequiredMutex.Unlock() + fake.UpdateRequiredStub = nil + fake.updateRequiredReturns = struct { + result1 bool + }{result1} +} + +func (fake *Image) UpdateRequiredReturnsOnCall(i int, result1 bool) { + fake.updateRequiredMutex.Lock() + defer fake.updateRequiredMutex.Unlock() + fake.UpdateRequiredStub = nil + if fake.updateRequiredReturnsOnCall == nil { + fake.updateRequiredReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.updateRequiredReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Image) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.setDefaultsMutex.RLock() + defer fake.setDefaultsMutex.RUnlock() + fake.updateRequiredMutex.RLock() + defer fake.updateRequiredMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Image) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ reconcilechecks.Image = new(Image) diff --git a/pkg/offering/common/reconcilechecks/mocks/instance.go b/pkg/offering/common/reconcilechecks/mocks/instance.go new file mode 100644 index 00000000..48fd8258 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/mocks/instance.go @@ -0,0 +1,336 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" +) + +type Instance struct { + GetArchStub func() []string + getArchMutex sync.RWMutex + getArchArgsForCall []struct { + } + getArchReturns struct { + result1 []string + } + getArchReturnsOnCall map[int]struct { + result1 []string + } + GetFabricVersionStub func() string + getFabricVersionMutex sync.RWMutex + getFabricVersionArgsForCall []struct { + } + getFabricVersionReturns struct { + result1 string + } + getFabricVersionReturnsOnCall map[int]struct { + result1 string + } + GetRegistryURLStub func() string + getRegistryURLMutex sync.RWMutex + getRegistryURLArgsForCall []struct { + } + getRegistryURLReturns struct { + result1 string + } + getRegistryURLReturnsOnCall map[int]struct { + result1 string + } + ImagesSetStub func() bool + imagesSetMutex sync.RWMutex + imagesSetArgsForCall []struct { + } + imagesSetReturns struct { + result1 bool + } + imagesSetReturnsOnCall map[int]struct { + result1 bool + } + SetFabricVersionStub func(string) + setFabricVersionMutex sync.RWMutex + setFabricVersionArgsForCall []struct { + arg1 string + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Instance) GetArch() []string { + fake.getArchMutex.Lock() + ret, specificReturn := fake.getArchReturnsOnCall[len(fake.getArchArgsForCall)] + fake.getArchArgsForCall = append(fake.getArchArgsForCall, struct { + }{}) + stub := fake.GetArchStub + fakeReturns := fake.getArchReturns + fake.recordInvocation("GetArch", []interface{}{}) + fake.getArchMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetArchCallCount() int { + fake.getArchMutex.RLock() + defer fake.getArchMutex.RUnlock() + return len(fake.getArchArgsForCall) +} + +func (fake *Instance) GetArchCalls(stub func() []string) { + fake.getArchMutex.Lock() + defer fake.getArchMutex.Unlock() + fake.GetArchStub = stub +} + +func (fake *Instance) GetArchReturns(result1 []string) { + fake.getArchMutex.Lock() + defer fake.getArchMutex.Unlock() + fake.GetArchStub = nil + fake.getArchReturns = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetArchReturnsOnCall(i int, result1 []string) { + fake.getArchMutex.Lock() + defer fake.getArchMutex.Unlock() + fake.GetArchStub = nil + if fake.getArchReturnsOnCall == nil { + fake.getArchReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getArchReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *Instance) GetFabricVersion() string { + fake.getFabricVersionMutex.Lock() + ret, specificReturn := fake.getFabricVersionReturnsOnCall[len(fake.getFabricVersionArgsForCall)] + fake.getFabricVersionArgsForCall = append(fake.getFabricVersionArgsForCall, struct { + }{}) + stub := fake.GetFabricVersionStub + fakeReturns := fake.getFabricVersionReturns + fake.recordInvocation("GetFabricVersion", []interface{}{}) + fake.getFabricVersionMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetFabricVersionCallCount() int { + fake.getFabricVersionMutex.RLock() + defer fake.getFabricVersionMutex.RUnlock() + return len(fake.getFabricVersionArgsForCall) +} + +func (fake *Instance) GetFabricVersionCalls(stub func() string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = stub +} + +func (fake *Instance) GetFabricVersionReturns(result1 string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = nil + fake.getFabricVersionReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetFabricVersionReturnsOnCall(i int, result1 string) { + fake.getFabricVersionMutex.Lock() + defer fake.getFabricVersionMutex.Unlock() + fake.GetFabricVersionStub = nil + if fake.getFabricVersionReturnsOnCall == nil { + fake.getFabricVersionReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getFabricVersionReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetRegistryURL() string { + fake.getRegistryURLMutex.Lock() + ret, specificReturn := fake.getRegistryURLReturnsOnCall[len(fake.getRegistryURLArgsForCall)] + fake.getRegistryURLArgsForCall = append(fake.getRegistryURLArgsForCall, struct { + }{}) + stub := fake.GetRegistryURLStub + fakeReturns := fake.getRegistryURLReturns + fake.recordInvocation("GetRegistryURL", []interface{}{}) + fake.getRegistryURLMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) GetRegistryURLCallCount() int { + fake.getRegistryURLMutex.RLock() + defer fake.getRegistryURLMutex.RUnlock() + return len(fake.getRegistryURLArgsForCall) +} + +func (fake *Instance) GetRegistryURLCalls(stub func() string) { + fake.getRegistryURLMutex.Lock() + defer fake.getRegistryURLMutex.Unlock() + fake.GetRegistryURLStub = stub +} + +func (fake *Instance) GetRegistryURLReturns(result1 string) { + fake.getRegistryURLMutex.Lock() + defer fake.getRegistryURLMutex.Unlock() + fake.GetRegistryURLStub = nil + fake.getRegistryURLReturns = struct { + result1 string + }{result1} +} + +func (fake *Instance) GetRegistryURLReturnsOnCall(i int, result1 string) { + fake.getRegistryURLMutex.Lock() + defer fake.getRegistryURLMutex.Unlock() + fake.GetRegistryURLStub = nil + if fake.getRegistryURLReturnsOnCall == nil { + fake.getRegistryURLReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getRegistryURLReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Instance) ImagesSet() bool { + fake.imagesSetMutex.Lock() + ret, specificReturn := fake.imagesSetReturnsOnCall[len(fake.imagesSetArgsForCall)] + fake.imagesSetArgsForCall = append(fake.imagesSetArgsForCall, struct { + }{}) + stub := fake.ImagesSetStub + fakeReturns := fake.imagesSetReturns + fake.recordInvocation("ImagesSet", []interface{}{}) + fake.imagesSetMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Instance) ImagesSetCallCount() int { + fake.imagesSetMutex.RLock() + defer fake.imagesSetMutex.RUnlock() + return len(fake.imagesSetArgsForCall) +} + +func (fake *Instance) ImagesSetCalls(stub func() bool) { + fake.imagesSetMutex.Lock() + defer fake.imagesSetMutex.Unlock() + fake.ImagesSetStub = stub +} + +func (fake *Instance) ImagesSetReturns(result1 bool) { + fake.imagesSetMutex.Lock() + defer fake.imagesSetMutex.Unlock() + fake.ImagesSetStub = nil + fake.imagesSetReturns = struct { + result1 bool + }{result1} +} + +func (fake *Instance) ImagesSetReturnsOnCall(i int, result1 bool) { + fake.imagesSetMutex.Lock() + defer fake.imagesSetMutex.Unlock() + fake.ImagesSetStub = nil + if fake.imagesSetReturnsOnCall == nil { + fake.imagesSetReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesSetReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Instance) SetFabricVersion(arg1 string) { + fake.setFabricVersionMutex.Lock() + fake.setFabricVersionArgsForCall = append(fake.setFabricVersionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SetFabricVersionStub + fake.recordInvocation("SetFabricVersion", []interface{}{arg1}) + fake.setFabricVersionMutex.Unlock() + if stub != nil { + fake.SetFabricVersionStub(arg1) + } +} + +func (fake *Instance) SetFabricVersionCallCount() int { + fake.setFabricVersionMutex.RLock() + defer fake.setFabricVersionMutex.RUnlock() + return len(fake.setFabricVersionArgsForCall) +} + +func (fake *Instance) SetFabricVersionCalls(stub func(string)) { + fake.setFabricVersionMutex.Lock() + defer fake.setFabricVersionMutex.Unlock() + fake.SetFabricVersionStub = stub +} + +func (fake *Instance) SetFabricVersionArgsForCall(i int) string { + fake.setFabricVersionMutex.RLock() + defer fake.setFabricVersionMutex.RUnlock() + argsForCall := fake.setFabricVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Instance) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getArchMutex.RLock() + defer fake.getArchMutex.RUnlock() + fake.getFabricVersionMutex.RLock() + defer fake.getFabricVersionMutex.RUnlock() + fake.getRegistryURLMutex.RLock() + defer fake.getRegistryURLMutex.RUnlock() + fake.imagesSetMutex.RLock() + defer fake.imagesSetMutex.RUnlock() + fake.setFabricVersionMutex.RLock() + defer fake.setFabricVersionMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Instance) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ reconcilechecks.Instance = new(Instance) diff --git a/pkg/offering/common/reconcilechecks/mocks/update.go b/pkg/offering/common/reconcilechecks/mocks/update.go new file mode 100644 index 00000000..d5fd4b11 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/mocks/update.go @@ -0,0 +1,167 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" +) + +type Update struct { + FabricVersionUpdatedStub func() bool + fabricVersionUpdatedMutex sync.RWMutex + fabricVersionUpdatedArgsForCall []struct { + } + fabricVersionUpdatedReturns struct { + result1 bool + } + fabricVersionUpdatedReturnsOnCall map[int]struct { + result1 bool + } + ImagesUpdatedStub func() bool + imagesUpdatedMutex sync.RWMutex + imagesUpdatedArgsForCall []struct { + } + imagesUpdatedReturns struct { + result1 bool + } + imagesUpdatedReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Update) FabricVersionUpdated() bool { + fake.fabricVersionUpdatedMutex.Lock() + ret, specificReturn := fake.fabricVersionUpdatedReturnsOnCall[len(fake.fabricVersionUpdatedArgsForCall)] + fake.fabricVersionUpdatedArgsForCall = append(fake.fabricVersionUpdatedArgsForCall, struct { + }{}) + stub := fake.FabricVersionUpdatedStub + fakeReturns := fake.fabricVersionUpdatedReturns + fake.recordInvocation("FabricVersionUpdated", []interface{}{}) + fake.fabricVersionUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) FabricVersionUpdatedCallCount() int { + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + return len(fake.fabricVersionUpdatedArgsForCall) +} + +func (fake *Update) FabricVersionUpdatedCalls(stub func() bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = stub +} + +func (fake *Update) FabricVersionUpdatedReturns(result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + fake.fabricVersionUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) FabricVersionUpdatedReturnsOnCall(i int, result1 bool) { + fake.fabricVersionUpdatedMutex.Lock() + defer fake.fabricVersionUpdatedMutex.Unlock() + fake.FabricVersionUpdatedStub = nil + if fake.fabricVersionUpdatedReturnsOnCall == nil { + fake.fabricVersionUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.fabricVersionUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdated() bool { + fake.imagesUpdatedMutex.Lock() + ret, specificReturn := fake.imagesUpdatedReturnsOnCall[len(fake.imagesUpdatedArgsForCall)] + fake.imagesUpdatedArgsForCall = append(fake.imagesUpdatedArgsForCall, struct { + }{}) + stub := fake.ImagesUpdatedStub + fakeReturns := fake.imagesUpdatedReturns + fake.recordInvocation("ImagesUpdated", []interface{}{}) + fake.imagesUpdatedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) ImagesUpdatedCallCount() int { + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + return len(fake.imagesUpdatedArgsForCall) +} + +func (fake *Update) ImagesUpdatedCalls(stub func() bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = stub +} + +func (fake *Update) ImagesUpdatedReturns(result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + fake.imagesUpdatedReturns = struct { + result1 bool + }{result1} +} + +func (fake *Update) ImagesUpdatedReturnsOnCall(i int, result1 bool) { + fake.imagesUpdatedMutex.Lock() + defer fake.imagesUpdatedMutex.Unlock() + fake.ImagesUpdatedStub = nil + if fake.imagesUpdatedReturnsOnCall == nil { + fake.imagesUpdatedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.imagesUpdatedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *Update) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.fabricVersionUpdatedMutex.RLock() + defer fake.fabricVersionUpdatedMutex.RUnlock() + fake.imagesUpdatedMutex.RLock() + defer fake.imagesUpdatedMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Update) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ reconcilechecks.Update = new(Update) diff --git a/pkg/offering/common/reconcilechecks/mocks/version.go b/pkg/offering/common/reconcilechecks/mocks/version.go new file mode 100644 index 00000000..9ee3e384 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/mocks/version.go @@ -0,0 +1,186 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks/images" +) + +type Version struct { + NormalizeStub func(images.FabricVersionInstance) string + normalizeMutex sync.RWMutex + normalizeArgsForCall []struct { + arg1 images.FabricVersionInstance + } + normalizeReturns struct { + result1 string + } + normalizeReturnsOnCall map[int]struct { + result1 string + } + ValidateStub func(images.FabricVersionInstance) error + validateMutex sync.RWMutex + validateArgsForCall []struct { + arg1 images.FabricVersionInstance + } + validateReturns struct { + result1 error + } + validateReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *Version) Normalize(arg1 images.FabricVersionInstance) string { + fake.normalizeMutex.Lock() + ret, specificReturn := fake.normalizeReturnsOnCall[len(fake.normalizeArgsForCall)] + fake.normalizeArgsForCall = append(fake.normalizeArgsForCall, struct { + arg1 images.FabricVersionInstance + }{arg1}) + stub := fake.NormalizeStub + fakeReturns := fake.normalizeReturns + fake.recordInvocation("Normalize", []interface{}{arg1}) + fake.normalizeMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Version) NormalizeCallCount() int { + fake.normalizeMutex.RLock() + defer fake.normalizeMutex.RUnlock() + return len(fake.normalizeArgsForCall) +} + +func (fake *Version) NormalizeCalls(stub func(images.FabricVersionInstance) string) { + fake.normalizeMutex.Lock() + defer fake.normalizeMutex.Unlock() + fake.NormalizeStub = stub +} + +func (fake *Version) NormalizeArgsForCall(i int) images.FabricVersionInstance { + fake.normalizeMutex.RLock() + defer fake.normalizeMutex.RUnlock() + argsForCall := fake.normalizeArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Version) NormalizeReturns(result1 string) { + fake.normalizeMutex.Lock() + defer fake.normalizeMutex.Unlock() + fake.NormalizeStub = nil + fake.normalizeReturns = struct { + result1 string + }{result1} +} + +func (fake *Version) NormalizeReturnsOnCall(i int, result1 string) { + fake.normalizeMutex.Lock() + defer fake.normalizeMutex.Unlock() + fake.NormalizeStub = nil + if fake.normalizeReturnsOnCall == nil { + fake.normalizeReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.normalizeReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *Version) Validate(arg1 images.FabricVersionInstance) error { + fake.validateMutex.Lock() + ret, specificReturn := fake.validateReturnsOnCall[len(fake.validateArgsForCall)] + fake.validateArgsForCall = append(fake.validateArgsForCall, struct { + arg1 images.FabricVersionInstance + }{arg1}) + stub := fake.ValidateStub + fakeReturns := fake.validateReturns + fake.recordInvocation("Validate", []interface{}{arg1}) + fake.validateMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Version) ValidateCallCount() int { + fake.validateMutex.RLock() + defer fake.validateMutex.RUnlock() + return len(fake.validateArgsForCall) +} + +func (fake *Version) ValidateCalls(stub func(images.FabricVersionInstance) error) { + fake.validateMutex.Lock() + defer fake.validateMutex.Unlock() + fake.ValidateStub = stub +} + +func (fake *Version) ValidateArgsForCall(i int) images.FabricVersionInstance { + fake.validateMutex.RLock() + defer fake.validateMutex.RUnlock() + argsForCall := fake.validateArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *Version) ValidateReturns(result1 error) { + fake.validateMutex.Lock() + defer fake.validateMutex.Unlock() + fake.ValidateStub = nil + fake.validateReturns = struct { + result1 error + }{result1} +} + +func (fake *Version) ValidateReturnsOnCall(i int, result1 error) { + fake.validateMutex.Lock() + defer fake.validateMutex.Unlock() + fake.ValidateStub = nil + if fake.validateReturnsOnCall == nil { + fake.validateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.validateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *Version) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.normalizeMutex.RLock() + defer fake.normalizeMutex.RUnlock() + fake.validateMutex.RLock() + defer fake.validateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *Version) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ reconcilechecks.Version = new(Version) diff --git a/pkg/offering/common/reconcilechecks/reconcilechecks_suite_test.go b/pkg/offering/common/reconcilechecks/reconcilechecks_suite_test.go new file mode 100644 index 00000000..0ca0c859 --- /dev/null +++ b/pkg/offering/common/reconcilechecks/reconcilechecks_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reconcilechecks_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestReconcilechecks(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Reconcilechecks Suite") +} diff --git a/pkg/offering/common/result.go b/pkg/offering/common/result.go new file mode 100644 index 00000000..5307edad --- /dev/null +++ b/pkg/offering/common/result.go @@ -0,0 +1,30 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Result struct { + reconcile.Result + Status *current.CRStatus + OverrideUpdateStatus bool +} diff --git a/pkg/offering/common/secret.go b/pkg/offering/common/secret.go new file mode 100644 index 00000000..3aea14e4 --- /dev/null +++ b/pkg/offering/common/secret.go @@ -0,0 +1,374 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func GetTLSSignCertEncoded(client k8sclient.Client, instance v1.Object) (string, error) { + return getSignCertEncoded("tls", client, instance) +} + +func GetTLSKeystoreEncoded(client k8sclient.Client, instance v1.Object) (string, error) { + return getKeystoreEncoded("tls", client, instance) +} + +func GetTLSCACertEncoded(client k8sclient.Client, instance v1.Object) ([]string, error) { + return getCACertEncoded("tls", client, instance) +} + +func GetEcertSignCertEncoded(client k8sclient.Client, instance v1.Object) (string, error) { + return getSignCertEncoded("ecert", client, instance) +} + +func GetEcertKeystoreEncoded(client k8sclient.Client, instance v1.Object) (string, error) { + return getKeystoreEncoded("ecert", client, instance) +} + +func GetEcertCACertEncoded(client k8sclient.Client, instance v1.Object) ([]string, error) { + return getCACertEncoded("ecert", client, instance) +} + +func GetEcertAdmincertEncoded(client k8sclient.Client, instance v1.Object) ([]string, error) { + return getAdmincertEncoded("ecert", client, instance) +} + +func GetEcertIntercertEncoded(client k8sclient.Client, instance v1.Object) ([]string, error) { + return getIntermediateCertEncoded("ecert", client, instance) +} + +func GetTLSIntercertEncoded(client k8sclient.Client, instance v1.Object) ([]string, error) { + return getIntermediateCertEncoded("tls", client, instance) +} + +func getSignCertBytes(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([]byte, error) { + secretName := fmt.Sprintf("%s-%s-signcert", prefix, instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, fmt.Errorf("%s signcert secret is blank", prefix) + } + + if secret.Data["cert.pem"] != nil { + return secret.Data["cert.pem"], nil + } + + return nil, fmt.Errorf("cannot get %s signcert", prefix) +} + +func getKeystoreBytes(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([]byte, error) { + secretName := fmt.Sprintf("%s-%s-keystore", prefix, instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, fmt.Errorf("%s keystore secret is blank", prefix) + } + + if secret.Data["key.pem"] != nil { + return secret.Data["key.pem"], nil + } + + return nil, fmt.Errorf("cannot get %s keystore", prefix) +} + +func getCACertBytes(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([][]byte, error) { + secretName := fmt.Sprintf("%s-%s-cacerts", prefix, instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, errors.New(fmt.Sprintf("%s cacert secret is blank", prefix)) + } + + var certs [][]byte + for _, cert := range secret.Data { + if cert != nil { + certs = append(certs, cert) + } + } + + return certs, nil +} + +func getAdmincertBytes(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([][]byte, error) { + secretName := fmt.Sprintf("%s-%s-admincerts", prefix, instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + // if admincert secret is not found, admincerts dont exist + if k8serrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + // do not throw error + return nil, nil // errors.New("Ecert admincert secret is blank") + } + + var certs [][]byte + for _, cert := range secret.Data { + if cert != nil { + certs = append(certs, cert) + } + } + + return certs, nil +} + +func getIntermediateCertBytes(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([][]byte, error) { + secretName := fmt.Sprintf("%s-%s-intercerts", prefix, instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + // if intercert secret is not found, intercerts dont exist + if k8serrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + // do not throw error + return nil, nil + } + + var certs [][]byte + for _, cert := range secret.Data { + if cert != nil { + certs = append(certs, cert) + } + } + + return certs, nil +} + +func getSignCertEncoded(prefix common.SecretType, client k8sclient.Client, instance v1.Object) (string, error) { + certBytes, err := getSignCertBytes(prefix, client, instance) + if err != nil { + return "", err + } + + cert := base64.StdEncoding.EncodeToString(certBytes) + return cert, nil +} + +func getKeystoreEncoded(prefix common.SecretType, client k8sclient.Client, instance v1.Object) (string, error) { + keyBytes, err := getKeystoreBytes(prefix, client, instance) + if err != nil { + return "", err + } + + cert := base64.StdEncoding.EncodeToString(keyBytes) + return cert, nil +} + +func getCACertEncoded(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([]string, error) { + certBytes, err := getCACertBytes(prefix, client, instance) + if err != nil { + return nil, err + } + + var certs []string + for _, certByte := range certBytes { + cert := base64.StdEncoding.EncodeToString(certByte) + certs = append(certs, cert) + } + return certs, nil +} + +func getAdmincertEncoded(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([]string, error) { + certBytes, err := getAdmincertBytes(prefix, client, instance) + if err != nil { + return nil, err + } + + var certs []string + for _, certByte := range certBytes { + cert := base64.StdEncoding.EncodeToString(certByte) + certs = append(certs, cert) + } + return certs, nil +} + +func getIntermediateCertEncoded(prefix common.SecretType, client k8sclient.Client, instance v1.Object) ([]string, error) { + certBytes, err := getIntermediateCertBytes(prefix, client, instance) + if err != nil { + return nil, err + } + + var certs []string + for _, certByte := range certBytes { + cert := base64.StdEncoding.EncodeToString(certByte) + certs = append(certs, cert) + } + return certs, nil +} + +type CACryptoBytes struct { + Cert []byte + Key []byte + OperationsCert []byte + OperationsKey []byte + TLSCert []byte + TLSKey []byte +} + +func GetCACryptoBytes(client k8sclient.Client, instance v1.Object) (*CACryptoBytes, error) { + secretName := fmt.Sprintf("%s-ca-crypto", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, errors.New("CA crypto secret is blank") + } + + if secret.Data["tls-cert.pem"] == nil { + return nil, errors.New("cannot get tlscert") + } + + return &CACryptoBytes{ + TLSCert: secret.Data["tls-cert.pem"], + TLSKey: secret.Data["tls-key.pem"], + Cert: secret.Data["cert.pem"], + Key: secret.Data["key.pem"], + OperationsCert: secret.Data["operations-cert.pem"], + OperationsKey: secret.Data["operations-key.pem"], + }, nil +} + +func GetTLSCACryptoBytes(client k8sclient.Client, instance v1.Object) (*CACryptoBytes, error) { + secretName := fmt.Sprintf("%s-tlsca-crypto", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: instance.GetNamespace(), + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), namespacedName, secret) + if err != nil { + return nil, err + } + + if secret.Data == nil || len(secret.Data) == 0 { + return nil, errors.New("TLSCA crypto secret is blank") + } + if secret.Data["cert.pem"] == nil { + return nil, errors.New("cannot get root TLSCA cert") + } + return &CACryptoBytes{ + Cert: secret.Data["cert.pem"], + Key: secret.Data["key.pem"], + }, nil +} + +type CACryptoEncoded struct { + Cert string + Key string + OperationsCert string + OperationsKey string + TLSCert string + TLSKey string +} + +func GetCACryptoEncoded(client k8sclient.Client, instance v1.Object) (*CACryptoEncoded, error) { + bytes, err := GetCACryptoBytes(client, instance) + if err != nil { + return nil, err + } + + encoded := &CACryptoEncoded{} + encoded.Cert = base64.StdEncoding.EncodeToString(bytes.Cert) + encoded.Key = base64.StdEncoding.EncodeToString(bytes.Key) + encoded.OperationsCert = base64.StdEncoding.EncodeToString(bytes.OperationsCert) + encoded.OperationsKey = base64.StdEncoding.EncodeToString(bytes.OperationsKey) + encoded.TLSCert = base64.StdEncoding.EncodeToString(bytes.TLSCert) + encoded.TLSKey = base64.StdEncoding.EncodeToString(bytes.TLSKey) + + return encoded, err +} + +func GetTLSCACryptoEncoded(client k8sclient.Client, instance v1.Object) (*CACryptoEncoded, error) { + bytes, err := GetTLSCACryptoBytes(client, instance) + if err != nil { + return nil, err + } + + encoded := &CACryptoEncoded{} + encoded.Cert = base64.StdEncoding.EncodeToString(bytes.Cert) + encoded.Key = base64.StdEncoding.EncodeToString(bytes.Key) + + return encoded, err +} diff --git a/pkg/offering/k8s/ca/ca.go b/pkg/offering/k8s/ca/ca.go new file mode 100644 index 00000000..3c295dba --- /dev/null +++ b/pkg/offering/k8s/ca/ca.go @@ -0,0 +1,227 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sca + +import ( + "context" + "fmt" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + basecaoverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + override "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("k8s_ca") + +type Override interface { + baseca.Override + Ingress(v1.Object, *networkingv1.Ingress, resources.Action) error + Ingressv1beta1(v1.Object, *networkingv1beta1.Ingress, resources.Action) error +} + +var _ baseca.IBPCA = &CA{} + +type CA struct { + *baseca.CA + + IngressManager resources.Manager + Ingressv1beta1Manager resources.Manager + + Override Override +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config) *CA { + o := &override.Override{ + Override: basecaoverride.Override{ + Client: client, + }, + } + ca := &CA{ + CA: baseca.New(client, scheme, config, o), + Override: o, + } + ca.CreateManagers() + return ca +} + +func (ca *CA) CreateManagers() { + resourceManager := resourcemanager.New(ca.Client, ca.Scheme) + ca.IngressManager = resourceManager.CreateIngressManager("", ca.Override.Ingress, ca.GetLabels, ca.Config.CAInitConfig.IngressFile) + ca.Ingressv1beta1Manager = resourceManager.CreateIngressv1beta1Manager("", ca.Override.Ingressv1beta1, ca.GetLabels, ca.Config.CAInitConfig.Ingressv1beta1File) +} + +func (ca *CA) Reconcile(instance *current.IBPCA, update baseca.Update) (common.Result, error) { + + var err error + + versionSet, err := ca.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := ca.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Updating instance after pre reconcile checks") + err := ca.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPCA{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + err = ca.AddTLSCryptoIfMissing(instance, ca.GetEndpointsDNS(instance)) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to generate tls crypto") + } + + err = ca.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.CAInitilizationFailed, "failed to initialize ca") + } + + err = ca.ReconcileManagers(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + if update.CATagUpdated() { + if err := ca.ReconcileFabricCAMigration(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricCAMigrationFailed, "failed to migrate fabric ca versions") + } + } + + err = ca.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = ca.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + status, err := ca.CheckCertificates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check for expiring certificates") + } + + if update.CACryptoUpdated() { + err = ca.Restart.ForTLSReenroll(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + err = ca.HandleActions(instance, update) + if err != nil { + return common.Result{}, err + } + + err = ca.HandleRestart(instance, update) + if err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (ca *CA) ReconcileManagers(instance *current.IBPCA, update baseca.Update) error { + err := ca.CA.ReconcileManagers(instance, update) + if err != nil { + return err + } + + err = ca.ReconcileIngressManager(instance, update.SpecUpdated()) + if err != nil { + return err + } + + return nil +} + +func (ca *CA) ReconcileIngressManager(instance *current.IBPCA, update bool) error { + if ca.Config.Operator.Globals.AllowKubernetesEighteen == "true" { + // check k8s version + version, err := util.GetServerVersion() + if err != nil { + return err + } + if strings.Compare(version.Minor, "19") < 0 { // v1beta + err = ca.Ingressv1beta1Manager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingressv1beta1 reconciliation") + } + } else { + err = ca.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + } else { + err := ca.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + return nil +} diff --git a/pkg/offering/k8s/ca/ca_suite_test.go b/pkg/offering/k8s/ca/ca_suite_test.go new file mode 100644 index 00000000..0c8b2e76 --- /dev/null +++ b/pkg/offering/k8s/ca/ca_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sca_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} diff --git a/pkg/offering/k8s/ca/ca_test.go b/pkg/offering/k8s/ca/ca_test.go new file mode 100644 index 00000000..273a7dcc --- /dev/null +++ b/pkg/offering/k8s/ca/ca_test.go @@ -0,0 +1,264 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sca_test + +import ( + "context" + "encoding/json" + "errors" + "os" + "path/filepath" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + cav1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + basecamocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/mocks" + k8sca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca" + override "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("K8s CA", func() { + const ( + defaultConfigs = "../../../../defaultconfig/ca" + testdataDir = "../../../../testdata" + + keyBase64 = "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdFJBUDlMemUyZEc1cm1rbmcvdVVtREFZU0VwUElqRFdUUDhqUjMxcUJ5Yjc3YWUrCnk3UTRvRnZod1lDVUhsUWVTWjFKeTdUUHpEcitoUk5hdDJYNGdGYUpGYmVFbC9DSHJ3Rk1mNzNzQStWV1pHdnkKdXhtbjB2bEdYMW5zSEo5aUdIUS9qR2FvV1FJYzlVbnpHWi8yWStlZkpxOWd3cDBNemFzWWZkdXordXVBNlp4VAp5TTdDOWFlWmxYL2ZMYmVkSXVXTzVzaXhPSlZQeUVpcWpkd0RiY1AxYy9mRCtSMm1DbmM3VGovSnVLK1poTGxPCnhGcVlFRmtROHBmSi9LY1pabVF1QURZVFh6RGp6OENxcTRTRU5ySzI0b2hQQkN2SGgyanplWjhGdGR4MmpSSFQKaXdCZWZEYWlSWVBSOUM4enk4K1Z2Wmt6S0hQV3N5aENiNUMrN1FJREFRQUJBb0lCQUZROGhzL2IxdW9Mc3BFOApCdEJXaVVsTWh0K0xBc25yWXFncnd5UU5hdmlzNEdRdXVJdFk2MGRmdCtZb2hjQ2ViZ0RkbG1tWlUxdTJ6cGJtCjdEdUt5MVFaN21rV0dpLytEWUlUM3AxSHBMZ2pTRkFzRUorUFRnN1BQamc2UTZrRlZjUCt3Vm4yb0xmWVRkU28KZE5zbEdxSmNMaVQzVHRMNzhlcjFnTTE5RzN6T3J1ZndrSGJSYU1BRmtvZ1ExUlZLSWpnVGUvbmpIMHFHNW9JagoxNEJLeFFKTUZFTG1pQk50NUx5OVMxWWdxTDRjbmNtUDN5L1QyNEdodVhNckx0eTVOeVhnS0dFZ1pUTDMzZzZvCnYreDFFMFRURWRjMVQvWVBGWkdBSXhHdWRKNWZZZ2JtWU9LZ09mUHZFOE9TbEV6OW56aHNnckVZYjdQVThpZDUKTHFycVJRRUNnWUVBNjIyT3RIUmMxaVY1ZXQxdHQydTVTTTlTS2h2b0lPT3d2Q3NnTEI5dDJzNEhRUlRYN0RXcAo0VDNpUC9leEl5OXI3bTIxNFo5MEgzZlpVNElSUkdHSUxKUVMrYzRQNVA4cHJFTDcyd1dIWlpQTTM3QlZTQ1U3CkxOTXl4TkRjeVdjSUJIVFh4NUY2eXhLNVFXWTg5MVB0eDlDamJFSEcrNVJVdDA4UVlMWDlUQTBDZ1lFQXhPSmYKcXFjeThMOVZyYUFVZG9lbGdIU0NGSkJRR3hMRFNSQlJSTkRIOUJhaWlZOCtwZzd2TExTRXFMRFpsbkZPbFkrQQpiRENEQ0RtdHhwRXViY0x6b3FnOXhlQTZ0eXZZWkNWalY5dXVzNVh1Wmk1VDBBUHhCdm56OHNNa3dRY3RQWkRQCk8zQTN4WllkZzJBRmFrV1BmT1FFbjVaK3F4TU13SG9VZ1ZwQkptRUNnWUJ2Q2FjcTJVOEgrWGpJU0ROOU5TT1kKZ1ovaEdIUnRQcmFXcVVodFJ3MkxDMjFFZHM0NExEOUphdVNSQXdQYThuelhZWXROTk9XU0NmYkllaW9tdEZHRApwUHNtTXRnd1MyQ2VUS0Y0OWF5Y2JnOU0yVi8vdlAraDdxS2RUVjAwNkpGUmVNSms3K3FZYU9aVFFDTTFDN0swCmNXVUNwQ3R6Y014Y0FNQmF2THNRNlFLQmdHbXJMYmxEdjUxaXM3TmFKV0Z3Y0MwL1dzbDZvdVBFOERiNG9RV1UKSUowcXdOV2ZvZm95TGNBS3F1QjIrbkU2SXZrMmFiQ25ZTXc3V0w4b0VJa3NodUtYOVgrTVZ6Y1VPekdVdDNyaQpGeU9mcHJJRXowcm5zcWNSNUJJNUZqTGJqVFpyMEMyUWp2NW5FVFAvaHlpQWFRQ1l5THAyWlVtZ0Vjb0VPNWtwClBhcEJBb0dBZVV0WjE0SVp2cVorQnAxR1VqSG9PR0pQVnlJdzhSRUFETjRhZXRJTUlQRWFVaDdjZUtWdVN6VXMKci9WczA1Zjg0cFBVaStuUTUzaGo2ZFhhYTd1UE1aMFBnNFY4cS9UdzJMZ3BWWndVd0ltZUQrcXNsbldha3VWMQpMSnp3SkhOa3pOWE1OMmJWREFZTndSamNRSmhtbzF0V2xHYlpRQjNoSkEwR2thWGZPa2c9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" + certBase64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBekNDQWV1Z0F3SUJBZ0lKQU9xQ1VmaFNjcWtlTUEwR0NTcUdTSWIzRFFFQkJRVUFNQmd4RmpBVUJnTlYKQkFNTURYQnZjM1JuY21WekxuUmxjM1F3SGhjTk1Ua3dOekl6TVRrd09UVTRXaGNOTWprd056SXdNVGt3T1RVNApXakFZTVJZd0ZBWURWUVFEREExd2IzTjBaM0psY3k1MFpYTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF0UkFQOUx6ZTJkRzVybWtuZy91VW1EQVlTRXBQSWpEV1RQOGpSMzFxQnliNzdhZSsKeTdRNG9Gdmh3WUNVSGxRZVNaMUp5N1RQekRyK2hSTmF0Mlg0Z0ZhSkZiZUVsL0NIcndGTWY3M3NBK1ZXWkd2eQp1eG1uMHZsR1gxbnNISjlpR0hRL2pHYW9XUUljOVVuekdaLzJZK2VmSnE5Z3dwME16YXNZZmR1eit1dUE2WnhUCnlNN0M5YWVabFgvZkxiZWRJdVdPNXNpeE9KVlB5RWlxamR3RGJjUDFjL2ZEK1IybUNuYzdUai9KdUsrWmhMbE8KeEZxWUVGa1E4cGZKL0tjWlptUXVBRFlUWHpEano4Q3FxNFNFTnJLMjRvaFBCQ3ZIaDJqemVaOEZ0ZHgyalJIVAppd0JlZkRhaVJZUFI5Qzh6eTgrVnZaa3pLSFBXc3loQ2I1Qys3UUlEQVFBQm8xQXdUakFkQmdOVkhRNEVGZ1FVCi9mZ01BcExIMXBvcFFoS25KTmgrVk04QUtQZ3dId1lEVlIwakJCZ3dGb0FVL2ZnTUFwTEgxcG9wUWhLbkpOaCsKVk04QUtQZ3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQkFRVUZBQU9DQVFFQURjOUc4M05LaWw3ZQpoVFlvR1piejhFV1o4c0puVnY4azMwRDlydUY1OXFvT0ppZGorQUhNbzNHOWtud1lvbGFGbmJwb093cElOZ3g1CnYvL21aU3VldlFMZUZKRlN1UjBheVQ1WFYxcjljNUZGQ2JSaEp0cE4rOEdTT29tRUFSYTNBVGVFSG5WeVpaYkMKWkFQQUxMVXlVeUVrSDR3Q0RZUGtYa3dWQVVlR2FGVmNqZWR0eGJ3Z2k0dG0rSFZoTEt5Y0NoZ25YUVhxQ2srTwo2RHJIc0Z0STVTNWQvQlBPbE1Yc28vNUFielBGelpVVVg4OEhkVUhWSWlqM0luMXdUbWhtREtwdzZ6dmcvNjIxCjRhcGhDOWJ2bXAxeUVOUklzb0xiMGlMWVAzRSswU0ZkZC9IRnRhVXV3eUx6cnl4R2xrdG1BVUJWNVdYZEQxMkIKTU1mQnhvNFVYUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + ) + + AfterEach(func() { + err := os.RemoveAll("shared") + Expect(err).NotTo(HaveOccurred()) + }) + + var ( + ca *k8sca.CA + instance *current.IBPCA + mockKubeClient *mocks.Client + + deploymentMgr *managermocks.ResourceManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + ingressMgr *managermocks.ResourceManager + + initMock *basecamocks.InitializeIBPCA + update *basecamocks.Update + certMgr *basecamocks.CertificateManager + ) + + Context("Reconciles", func() { + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + update = &basecamocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPCA{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPCA", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ca1", + Namespace: "test", + }, + Spec: current.IBPCASpec{ + Domain: "domain", + Replicas: &replicas, + Images: ¤t.CAImages{}, + FabricVersion: "1.4.9-0", + }, + Status: current.IBPCAStatus{ + CRStatus: current.CRStatus{ + Version: version.Operator, + }, + }, + } + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case instance.Name + "-ca-crypto": + o.Name = instance.Name + "-ca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"tls-cert.pem": []byte(certBase64)} + case instance.Name + "-tlsca-crypto": + o.Name = instance.Name + "-tlsca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte(certBase64)} + } + } + return nil + } + + deploymentMgr = &managermocks.ResourceManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + ingressMgr = &managermocks.ResourceManager{} + initMock = &basecamocks.InitializeIBPCA{} + restartMgr := &basecamocks.RestartManager{} + certMgr = &basecamocks.CertificateManager{} + + config := &config.Config{ + CAInitConfig: &initializer.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "/ca.yaml"), + CAOverrideConfigPath: filepath.Join(testdataDir, "init/override.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "tlsca.yaml"), + TLSCAOverrideConfigPath: filepath.Join(testdataDir, "init/override.yaml"), + SharedPath: "shared", + }, + Operator: config.Operator{ + Versions: &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4.9-0": {}, + }, + }, + }, + } + + certMgr.GetSecretReturns(&corev1.Secret{}, nil) + deploymentMgr.ExistsReturns(true) + ca = &k8sca.CA{ + CA: &baseca.CA{ + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Override: &override.Override{}, + Config: config, + Initializer: initMock, + Restart: restartMgr, + CertificateManager: certMgr, + }, + IngressManager: ingressMgr, + Override: &override.Override{}, + } + }) + + It("returns a breaking error if initialization fails", func() { + initMock.HandleEnrollmentCAInitReturns(nil, errors.New("failed to init")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 20 - failed to initialize ca: failed to init")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role binding")) + }) + + It("returns an error if service account manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile service account")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + ingressMgr.ReconcileReturns(errors.New("failed to reconcile ingress")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Ingress reconciliation: failed to reconcile ingress")) + }) + + It("returns an error if restart fails", func() { + update.RestartNeededReturns(true) + mockKubeClient.PatchReturns(errors.New("patch failed")) + _, err := ca.Reconcile(instance, update) + Expect(err).Should(MatchError(ContainSubstring("patch failed"))) + }) + + It("reconciles IBPCA", func() { + _, err := ca.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("AddTLSCryptoIfMissing", func() { + It("adds tls crypto", func() { + mockKubeClient.GetReturns(errors.New("fake error")) + err := ca.AddTLSCryptoIfMissing(instance, ¤t.CAEndpoints{}) + Expect(err).NotTo(HaveOccurred()) + + caOverrides := &cav1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + Expect(err).NotTo(HaveOccurred()) + + Expect(caOverrides.TLS.CertFile).NotTo(Equal("")) + Expect(caOverrides.TLS.KeyFile).NotTo(Equal("")) + }) + }) +}) diff --git a/pkg/offering/k8s/ca/override/ingress.go b/pkg/offering/k8s/ca/override/ingress.go new file mode 100644 index 00000000..067b47c0 --- /dev/null +++ b/pkg/offering/k8s/ca/override/ingress.go @@ -0,0 +1,117 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Ingress(object v1.Object, ingress *networkingv1.Ingress, action resources.Action) error { + instance := object.(*current.IBPCA) + + switch action { + case resources.Create: + return o.CreateIngress(instance, ingress) + case resources.Update: + return o.UpdateIngress(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngress(instance *current.IBPCA, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) UpdateIngress(instance *current.IBPCA, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) CommonIngress(instance *current.IBPCA, ingress *networkingv1.Ingress) error { + + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + apihost := instance.Namespace + "-" + instance.Name + "-ca" + "." + instance.Spec.Domain + operationshost := instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + + pathType := networkingv1.PathTypeImplementationSpecific + ingress.Spec = networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "http", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + networkingv1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "operations", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + networkingv1.IngressTLS{ + Hosts: []string{apihost}, + }, + networkingv1.IngressTLS{ + Hosts: []string{operationshost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/ca/override/ingress_test.go b/pkg/offering/k8s/ca/override/ingress_test.go new file mode 100644 index 00000000..f448e922 --- /dev/null +++ b/pkg/offering/k8s/ca/override/ingress_test.go @@ -0,0 +1,115 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s CA Ingress Overrides", func() { + var ( + err error + overrider *override.Override + instance *current.IBPCA + ingress *networkingv1.Ingress + cahost string + operhost string + ) + + BeforeEach(func() { + overrider = &override.Override{} + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + Domain: "test.domain", + }, + } + ingress, err = util.GetIngressFromFile("../../../../../definitions/ca/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + cahost = instance.Namespace + "-" + instance.Name + "-ca" + "." + instance.Spec.Domain + operhost = instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + }) + + Context("create", func() { + It("appropriately overrides the respective values for ingress", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting rule", func() { + pathType := networkingv1.PathTypeImplementationSpecific + Expect(ingress.Spec.Rules).To(HaveLen(2)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1.IngressRule{ + Host: cahost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "http", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[1]).To(Equal(networkingv1.IngressRule{ + Host: operhost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "operations", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(2)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{cahost})) + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{operhost})) + }) + }) + }) +}) diff --git a/pkg/offering/k8s/ca/override/ingressv1beta1.go b/pkg/offering/k8s/ca/override/ingressv1beta1.go new file mode 100644 index 00000000..bbe4d422 --- /dev/null +++ b/pkg/offering/k8s/ca/override/ingressv1beta1.go @@ -0,0 +1,107 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) Ingressv1beta1(object v1.Object, ingress *networkingv1beta1.Ingress, action resources.Action) error { + instance := object.(*current.IBPCA) + + switch action { + case resources.Create: + return o.CreateIngressv1beta1(instance, ingress) + case resources.Update: + return o.UpdateIngressv1beta1(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngressv1beta1(instance *current.IBPCA, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) UpdateIngressv1beta1(instance *current.IBPCA, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) CommonIngressv1beta1(instance *current.IBPCA, ingress *networkingv1beta1.Ingress) error { + + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + apihost := instance.Namespace + "-" + instance.Name + "-ca" + "." + instance.Spec.Domain + operationshost := instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + + ingress.Spec = networkingv1beta1.IngressSpec{ + Rules: []networkingv1beta1.IngressRule{ + networkingv1beta1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("http"), + }, + Path: "/", + }, + }, + }, + }, + }, + networkingv1beta1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("operations"), + }, + Path: "/", + }, + }, + }, + }, + }, + }, + TLS: []networkingv1beta1.IngressTLS{ + networkingv1beta1.IngressTLS{ + Hosts: []string{apihost}, + }, + networkingv1beta1.IngressTLS{ + Hosts: []string{operationshost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/ca/override/ingressv1beta1_test.go b/pkg/offering/k8s/ca/override/ingressv1beta1_test.go new file mode 100644 index 00000000..5947d540 --- /dev/null +++ b/pkg/offering/k8s/ca/override/ingressv1beta1_test.go @@ -0,0 +1,105 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s CA Ingress Overrides", func() { + var ( + err error + overrider *override.Override + instance *current.IBPCA + ingress *networkingv1beta1.Ingress + cahost string + operhost string + ) + + BeforeEach(func() { + overrider = &override.Override{} + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + Domain: "test.domain", + }, + } + ingress, err = util.GetIngressv1beta1FromFile("../../../../../definitions/ca/ingressv1beta1.yaml") + Expect(err).NotTo(HaveOccurred()) + + cahost = instance.Namespace + "-" + instance.Name + "-ca" + "." + instance.Spec.Domain + operhost = instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + }) + + Context("create", func() { + It("appropriately overrides the respective values for ingress", func() { + err := overrider.Ingressv1beta1(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting rule", func() { + Expect(ingress.Spec.Rules).To(HaveLen(2)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1beta1.IngressRule{ + Host: cahost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("http"), + }, + Path: "/", + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[1]).To(Equal(networkingv1beta1.IngressRule{ + Host: operhost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("operations"), + }, + Path: "/", + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(2)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{cahost})) + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{operhost})) + }) + }) + }) +}) diff --git a/pkg/offering/k8s/ca/override/override.go b/pkg/offering/k8s/ca/override/override.go new file mode 100644 index 00000000..d15aa416 --- /dev/null +++ b/pkg/offering/k8s/ca/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/override" +) + +type Override struct { + baseca.Override +} diff --git a/pkg/offering/k8s/ca/override/override_suite_test.go b/pkg/offering/k8s/ca/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/k8s/ca/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/k8s/ca/override/override_test.go b/pkg/offering/k8s/ca/override/override_test.go new file mode 100644 index 00000000..a1541404 --- /dev/null +++ b/pkg/offering/k8s/ca/override/override_test.go @@ -0,0 +1,145 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("K8S CA Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPCA + ) + + BeforeEach(func() { + overrider = &override.Override{} + }) + + Context("Ingress", func() { + var ( + ingress *networkingv1.Ingress + ) + + BeforeEach(func() { + var err error + + ingress, err = util.GetIngressFromFile("../../../../../definitions/ca/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPCA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress1", + Namespace: "namespace1", + }, + Spec: current.IBPCASpec{ + Domain: "domain1", + }, + } + }) + + When("creating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("creating ingress with custom ingress class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("updating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("updating ingress with custom ingress class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + }) +}) + +func VerifyIngressCommonOverrides(instance *current.IBPCA, ingress *networkingv1.Ingress) { + By("setting annotation for custom ingress class", func() { + if instance.Spec.Ingress.Class != "" { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal(instance.Spec.Ingress.Class)) + } else { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal("nginx")) + } + }) + + By("setting api host in rules host", func() { + Expect(ingress.Spec.Rules[0].Host).To(Equal(instance.Namespace + "-" + instance.Name + "-ca" + "." + instance.Spec.Domain)) + }) + + By("setting api tls host", func() { + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{instance.Namespace + "-" + instance.Name + "-ca" + "." + instance.Spec.Domain})) + }) + + By("setting backend service name", func() { + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(instance.Name)) + }) + + By("setting backend service port", func() { + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("http")) + }) + + By("setting operations host in rules host", func() { + Expect(ingress.Spec.Rules[1].Host).To(Equal(instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain)) + }) + + By("setting operations tls host", func() { + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain})) + }) + + By("setting backend service name", func() { + Expect(ingress.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Name).To(Equal(instance.Name)) + }) + + By("setting backend service port", func() { + Expect(ingress.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("operations")) + }) +} diff --git a/pkg/offering/k8s/console/console.go b/pkg/offering/k8s/console/console.go new file mode 100644 index 00000000..aec134f5 --- /dev/null +++ b/pkg/offering/k8s/console/console.go @@ -0,0 +1,200 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sconsole + +import ( + "context" + "fmt" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + baseconsoleoverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("k8s_console") + +type Override interface { + baseconsole.Override + Ingress(v1.Object, *networkingv1.Ingress, resources.Action) error + Ingressv1beta1(v1.Object, *networkingv1beta1.Ingress, resources.Action) error +} + +type Console struct { + *baseconsole.Console + + IngressManager resources.Manager + Ingressv1beta1Manager resources.Manager + + Override Override +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config) *Console { + o := &override.Override{ + Override: baseconsoleoverride.Override{}, + } + + console := &Console{ + Console: baseconsole.New(client, scheme, config, o), + Override: o, + } + + console.CreateManagers() + return console +} + +func (c *Console) CreateManagers() { + override := c.Override + resourceManager := resourcemanager.New(c.Client, c.Scheme) + c.IngressManager = resourceManager.CreateIngressManager("", override.Ingress, c.GetLabels, c.Config.ConsoleInitConfig.IngressFile) + c.Ingressv1beta1Manager = resourceManager.CreateIngressv1beta1Manager("", override.Ingressv1beta1, c.GetLabels, c.Config.ConsoleInitConfig.Ingressv1beta1File) +} + +func (c *Console) Reconcile(instance *current.IBPConsole, update baseconsole.Update) (common.Result, error) { + var err error + + versionSet, err := c.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := c.PreReconcileChecks(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Updating instance after pre reconcile checks") + err := c.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPConsole{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + err = c.ReconcileManagers(instance, update.SpecUpdated()) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = c.CheckStates(instance, update.SpecUpdated()) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + err = c.CheckForConfigMapUpdates(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check for config map updates") + } + + err = c.HandleActions(instance, update) + if err != nil { + return common.Result{}, err + } + + if err := c.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{}, nil +} + +func (c *Console) ReconcileManagers(instance *current.IBPConsole, update bool) error { + var err error + + err = c.Console.ReconcileManagers(instance, update) + if err != nil { + return err + } + + err = c.ReconcileIngressManager(instance, update) + if err != nil { + return err + } + + err = c.NetworkPolicyReconcile(instance) + if err != nil { + return errors.Wrap(err, "failed Network Policy reconciliation") + } + + return nil +} + +func (c *Console) ReconcileIngressManager(instance *current.IBPConsole, update bool) error { + if c.Config.Operator.Globals.AllowKubernetesEighteen == "true" { + // check k8s version + version, err := util.GetServerVersion() + if err != nil { + return err + } + if strings.Compare(version.Minor, "19") < 0 { // v1beta + err = c.Ingressv1beta1Manager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingressv1beta1 reconciliation") + } + } else { + err = c.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + } else { + err := c.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + return nil +} diff --git a/pkg/offering/k8s/console/console_suite_test.go b/pkg/offering/k8s/console/console_suite_test.go new file mode 100644 index 00000000..9235e8e1 --- /dev/null +++ b/pkg/offering/k8s/console/console_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sconsole_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConsole(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Console Suite") +} diff --git a/pkg/offering/k8s/console/console_test.go b/pkg/offering/k8s/console/console_test.go new file mode 100644 index 00000000..d75a218e --- /dev/null +++ b/pkg/offering/k8s/console/console_test.go @@ -0,0 +1,286 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sconsole_test + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + baseconsolemocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/mocks" + k8sconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("K8s Console", func() { + var ( + console *k8sconsole.Console + instance *current.IBPConsole + mockKubeClient *mocks.Client + + deploymentMgr *managermocks.ResourceManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + configMapMgr *managermocks.ResourceManager + consoleConfigMapMgr *managermocks.ResourceManager + deployerConfigMapMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + ingressMgr *managermocks.ResourceManager + ingressv1beta1Mgr *managermocks.ResourceManager + update *baseconsolemocks.Update + ) + + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + update = &baseconsolemocks.Update{} + + deploymentMgr = &managermocks.ResourceManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + configMapMgr = &managermocks.ResourceManager{} + consoleConfigMapMgr = &managermocks.ResourceManager{} + deployerConfigMapMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + ingressMgr = &managermocks.ResourceManager{} + ingressv1beta1Mgr = &managermocks.ResourceManager{} + + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + ServiceAccountName: "test", + AuthScheme: "couchdb", + DeployerTimeout: 30000, + Components: "athena-components", + Sessions: "athena-sessions", + System: "athena-system", + Service: ¤t.Service{}, + Email: "xyz@ibm.com", + Password: "cGFzc3dvcmQ=", + SystemChannel: "testchainid", + ImagePullSecrets: []string{"testsecret"}, + RegistryURL: "ghcr.io/ibm-blockchain/", + Kubeconfig: &[]byte{}, + Images: ¤t.ConsoleImages{ + ConsoleInitImage: "fake-init-image", + ConsoleInitTag: "1234", + CouchDBImage: "fake-couchdb-image", + CouchDBTag: "1234", + ConsoleImage: "fake-console-image", + ConsoleTag: "1234", + ConfigtxlatorImage: "fake-configtxlator-image", + ConfigtxlatorTag: "1234", + DeployerImage: "fake-deployer-image", + DeployerTag: "1234", + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + TLSSecretName: "secret", + Resources: ¤t.ConsoleResources{}, + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Size: "100m", + Class: "manual", + }, + }, + PasswordSecretName: "password", + Versions: ¤t.Versions{}, + ConnectionString: "https://localhost", + }, + } + instance.Kind = "IBPConsole" + instance.Status.Version = version.Operator + + console = &k8sconsole.Console{ + Console: &baseconsole.Console{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: &config.Config{}, + + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + ConfigMapManager: configMapMgr, + ConsoleConfigMapManager: consoleConfigMapMgr, + DeployerConfigMapManager: deployerConfigMapMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Restart: &baseconsolemocks.RestartManager{}, + }, + IngressManager: ingressMgr, + Ingressv1beta1Manager: ingressv1beta1Mgr, + } + }) + + Context("Reconciles", func() { + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed RBAC reconciliation: failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed RBAC reconciliation: failed to reconcile role binding")) + }) + + It("returns an error if service account binding manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed RBAC reconciliation: failed to reconcile service account")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + configMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + consoleConfigMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Console ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + deployerConfigMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployer ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns an error if ingress manager fails to reconcile", func() { + ingressMgr.ReconcileReturns(errors.New("failed to reconcile ingress")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Ingress reconciliation: failed to reconcile ingress")) + }) + + It("restarts pods by deleting deployment", func() { + update.RestartNeededReturns(true) + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchCallCount()).To(Equal(1)) + }) + + It("does not return an error on a successful reconcile", func() { + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("ValidateSpec", func() { + It("returns no error if valid spec is passed", func() { + err := console.ValidateSpec(instance) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns error if license is not accepted", func() { + instance.Spec.License.Accept = false + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("user must accept license before continuing")) + }) + + It("returns error if serviceaccountname is not passed", func() { + instance.Spec.ServiceAccountName = "" + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("Service account name not provided")) + }) + + It("returns error if email is not passed", func() { + instance.Spec.Email = "" + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("email not provided")) + }) + + It("returns error if password & passwordsecret are not passed", func() { + instance.Spec.PasswordSecretName = "" + instance.Spec.Password = "" + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("password and passwordSecretName both not provided, at least one expected")) + }) + + It("should not return error if password & passwordsecret are not passed when authscheme is ibmid", func() { + instance.Spec.AuthScheme = "ibmid" + instance.Spec.PasswordSecretName = "" + instance.Spec.Password = "" + err := console.ValidateSpec(instance) + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns error if imagepullsecret is not passed", func() { + instance.Spec.ImagePullSecrets = nil + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("imagepullsecrets required")) + }) + + It("returns error if ingress info are not passed", func() { + instance.Spec.NetworkInfo = nil + err := console.ValidateSpec(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("network information not provided")) + }) + }) +}) diff --git a/pkg/offering/k8s/console/override/consolecm.go b/pkg/offering/k8s/console/override/consolecm.go new file mode 100644 index 00000000..dbd1bc12 --- /dev/null +++ b/pkg/offering/k8s/console/override/consolecm.go @@ -0,0 +1,78 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func (o *Override) ConsoleCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateConsoleCM(instance, cm, options) + case resources.Update: + return o.UpdateConsoleCM(instance, cm, options) + } + + return nil +} + +func (o *Override) CreateConsoleCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + data := cm.Data["settings.yaml"] + + config := &consolev1.ConsoleSettingsConfig{} + err := yaml.Unmarshal([]byte(data), config) + if err != nil { + return err + } + + if instance.Spec.NetworkInfo == nil || instance.Spec.NetworkInfo.Domain == "" { + return errors.New("domain not provided") + } + + err = baseconsole.CommonConsoleCM(instance, config, options) + if err != nil { + return err + } + + config.Infrastructure = baseconsole.K8S + // config.ProxyTLSUrl = "https://" + instance.Namespace + "-" + instance.Name + "-console." + instance.Spec.NetworkInfo.Domain + ":443" + + bytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data["settings.yaml"] = string(bytes) + + return nil +} diff --git a/pkg/offering/k8s/console/override/consolecm_test.go b/pkg/offering/k8s/console/override/consolecm_test.go new file mode 100644 index 00000000..0d00b540 --- /dev/null +++ b/pkg/offering/k8s/console/override/consolecm_test.go @@ -0,0 +1,161 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8S Console Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + + cm, err = util.GetConfigMapFromFile("../../../../../definitions/console/console-configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consolecm", + Namespace: "consolecmns", + }, + Spec: current.IBPConsoleSpec{ + Email: "test@ibm.com", + AuthScheme: "scheme1", + ConfigtxlatorURL: "configtx.ibm.com", + DeployerURL: "deployer.ibm.com", + DeployerTimeout: 5, + Components: "component1", + Sessions: "session1", + System: "system1", + SystemChannel: "channel1", + FeatureFlags: &consolev1.FeatureFlags{ + CreateChannelEnabled: true, + }, + ClusterData: &consolev1.IBPConsoleClusterData{ + Zones: []string{"zone1"}, + Type: "type1", + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "ibm.com", + }, + }, + } + }) + + Context("create", func() { + It("returns an error if domain not provided", func() { + instance.Spec.NetworkInfo.Domain = "" + err := overrider.ConsoleCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("domain not provided")) + }) + + It("overrides values based on spec", func() { + err := overrider.ConsoleCM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &v1.ConsoleSettingsConfig{} + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + CommonConsoleCMOverrides(instance, config) + }) + }) + + Context("update", func() { + It("overrides values based on spec", func() { + err := overrider.ConsoleCM(instance, cm, resources.Update, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &v1.ConsoleSettingsConfig{} + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + CommonConsoleCMOverrides(instance, config) + }) + }) +}) + +func CommonConsoleCMOverrides(instance *current.IBPConsole, config *v1.ConsoleSettingsConfig) { + By("setting email", func() { + Expect(config.Email).To(Equal(instance.Spec.Email)) + }) + + By("setting auth scheme", func() { + Expect(config.AuthScheme).To(Equal(instance.Spec.AuthScheme)) + }) + + By("setting configtxlator URL", func() { + Expect(config.Configtxlator).To(Equal(instance.Spec.ConfigtxlatorURL)) + }) + + By("setting Deployer URL", func() { + Expect(config.DeployerURL).To(Equal(instance.Spec.DeployerURL)) + }) + + By("setting Deployer timeout", func() { + Expect(config.DeployerTimeout).To(Equal(instance.Spec.DeployerTimeout)) + }) + + By("setting components", func() { + Expect(config.DBCustomNames.Components).To(Equal(instance.Spec.Components)) + }) + + By("setting sessions", func() { + Expect(config.DBCustomNames.Sessions).To(Equal(instance.Spec.Sessions)) + }) + + By("setting system", func() { + Expect(config.DBCustomNames.System).To(Equal(instance.Spec.System)) + }) + + By("setting system channel", func() { + Expect(config.SystemChannelID).To(Equal(instance.Spec.SystemChannel)) + }) + + By("setting Proxy TLS Reqs", func() { + Expect(config.ProxyTLSReqs).To(Equal("always")) + }) + + By("settings feature flags", func() { + Expect(config.Featureflags).To(Equal(instance.Spec.FeatureFlags)) + }) + + By("settings cluster data", func() { + Expect(config.ClusterData).To(Equal(instance.Spec.ClusterData)) + }) +} diff --git a/pkg/offering/k8s/console/override/deployercm.go b/pkg/offering/k8s/console/override/deployercm.go new file mode 100644 index 00000000..b103999e --- /dev/null +++ b/pkg/offering/k8s/console/override/deployercm.go @@ -0,0 +1,79 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func (o *Override) DeployerCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateDeployerCM(instance, cm, options) + case resources.Update: + return o.UpdateDeployerCM(instance, cm, options) + } + + return nil +} + +func (o *Override) CreateDeployerCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + data := cm.Data["settings.yaml"] + + config := &deployer.Config{} + err := yaml.Unmarshal([]byte(data), config) + if err != nil { + return err + } + + if instance.Spec.NetworkInfo == nil || instance.Spec.NetworkInfo.Domain == "" { + return errors.New("domain not provided") + } + + err = baseconsole.CommonDeployerCM(instance, config, options) + if err != nil { + return err + } + + config.ClusterType = offering.K8S.String() + config.ServiceConfig.Type = corev1.ServiceTypeClusterIP + + bytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data["settings.yaml"] = string(bytes) + + return nil +} diff --git a/pkg/offering/k8s/console/override/deployercm_test.go b/pkg/offering/k8s/console/override/deployercm_test.go new file mode 100644 index 00000000..928798ea --- /dev/null +++ b/pkg/offering/k8s/console/override/deployercm_test.go @@ -0,0 +1,285 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8S Console Deployer Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + RegistryURL: "us.test.io/", + ImagePullSecrets: []string{"pullsecret"}, + ConnectionString: "connectionString1", + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Class: "sc1", + }, + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain1", + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + "1.4.6-1": current.VersionCA{ + Default: true, + Version: "1.4.6-1", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.4.6", + CAImage: "ca-image", + CATag: "1.4.6", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "1.4.6-1": current.VersionPeer{ + Default: true, + Version: "1.4.6-1", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.4.6", + PeerImage: "peer-image", + PeerTag: "1.4.6", + DindImage: "dind-iamge", + DindTag: "1.4.6", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.4.6", + FluentdImage: "fluentd-image", + FluentdTag: "1.4.6", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.4.6", + }, + }, + "2.1.0-1": current.VersionPeer{ + Default: true, + Version: "2.1.0-1", + Image: current.PeerImages{ + CCLauncherImage: "cclauncer-image", + CCLauncherTag: "1.4.6", + BuilderImage: "ibp-ccenv", + BuilderTag: "2.1.0-20200505", + GoEnvImage: "ibp-goenv", + GoEnvTag: "2.1.0-20200505", + JavaEnvImage: "ibp-javaenv", + JavaEnvTag: "2.1.0-20200505", + NodeEnvImage: "ibp-nodeenv", + NodeEnvTag: "2.1.0-20200505", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "1.4.6-1": current.VersionOrderer{ + Default: true, + Version: "1.4.6-1", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.4.6", + OrdererImage: "orderer-image", + OrdererTag: "1.4.6", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.4.6", + }, + }, + }, + }, + CRN: ¤t.CRN{ + CName: "cname", + CType: "ctype", + Location: "location1", + Servicename: "Servicename1", + Version: "version1", + AccountID: "id123", + }, + Deployer: ¤t.Deployer{ + ConnectionString: "connectionstring2", + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../testdata/deployercm/deployer-configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("return an error if no image pull secret provided", func() { + instance.Spec.ImagePullSecrets = nil + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no image pull secret provided")) + }) + + It("return an error if no domain provided", func() { + instance.Spec.NetworkInfo.Domain = "" + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("domain not provided")) + }) + + It("overrides values based on spec", func() { + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &deployer.Config{} + + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + By("setting cluster type", func() { + Expect(config.ClusterType).To(Equal(offering.K8S.String())) + }) + + By("setting service type", func() { + Expect(config.ServiceConfig.Type).To(Equal(corev1.ServiceTypeClusterIP)) + }) + + By("setting domain", func() { + Expect(config.Domain).To(Equal(instance.Spec.NetworkInfo.Domain)) + }) + + By("setting image pull secret", func() { + Expect(config.ImagePullSecrets).To(Equal(instance.Spec.ImagePullSecrets)) + }) + + By("setting connection string", func() { + Expect(config.Database.ConnectionURL).To(Equal(instance.Spec.Deployer.ConnectionString)) + }) + + By("setting versions", func() { + expectedVersions := ¤t.Versions{ + CA: map[string]current.VersionCA{ + "1.4.6-1": current.VersionCA{ + Default: true, + Version: "1.4.6-1", + Image: current.CAImages{ + CAInitImage: fmt.Sprintf("%sca-init-image", instance.Spec.RegistryURL), + CAInitTag: "1.4.6", + CAImage: fmt.Sprintf("%sca-image", instance.Spec.RegistryURL), + CATag: "1.4.6", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "1.4.6-1": current.VersionPeer{ + Default: true, + Version: "1.4.6-1", + Image: current.PeerImages{ + PeerInitImage: fmt.Sprintf("%speer-init-image", instance.Spec.RegistryURL), + PeerInitTag: "1.4.6", + PeerImage: fmt.Sprintf("%speer-image", instance.Spec.RegistryURL), + PeerTag: "1.4.6", + DindImage: fmt.Sprintf("%sdind-iamge", instance.Spec.RegistryURL), + DindTag: "1.4.6", + GRPCWebImage: fmt.Sprintf("%sgrpcweb-image", instance.Spec.RegistryURL), + GRPCWebTag: "1.4.6", + FluentdImage: fmt.Sprintf("%sfluentd-image", instance.Spec.RegistryURL), + FluentdTag: "1.4.6", + CouchDBImage: fmt.Sprintf("%scouchdb-image", instance.Spec.RegistryURL), + CouchDBTag: "1.4.6", + }, + }, + "2.1.0-1": current.VersionPeer{ + Default: true, + Version: "2.1.0-1", + Image: current.PeerImages{ + CCLauncherImage: fmt.Sprintf("%scclauncer-image", instance.Spec.RegistryURL), + CCLauncherTag: "1.4.6", + BuilderImage: fmt.Sprintf("%sibp-ccenv", instance.Spec.RegistryURL), + BuilderTag: "2.1.0-20200505", + GoEnvImage: fmt.Sprintf("%sibp-goenv", instance.Spec.RegistryURL), + GoEnvTag: "2.1.0-20200505", + JavaEnvImage: fmt.Sprintf("%sibp-javaenv", instance.Spec.RegistryURL), + JavaEnvTag: "2.1.0-20200505", + NodeEnvImage: fmt.Sprintf("%sibp-nodeenv", instance.Spec.RegistryURL), + NodeEnvTag: "2.1.0-20200505", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "1.4.6-1": current.VersionOrderer{ + Default: true, + Version: "1.4.6-1", + Image: current.OrdererImages{ + OrdererInitImage: fmt.Sprintf("%sorderer-init-image", instance.Spec.RegistryURL), + OrdererInitTag: "1.4.6", + OrdererImage: fmt.Sprintf("%sorderer-image", instance.Spec.RegistryURL), + OrdererTag: "1.4.6", + GRPCWebImage: fmt.Sprintf("%sgrpcweb-image", instance.Spec.RegistryURL), + GRPCWebTag: "1.4.6", + }, + }, + }, + } + + typeConvertedVersions := ¤t.Versions{} + util.ConvertSpec(config.Versions, typeConvertedVersions) + Expect(typeConvertedVersions).To(Equal(expectedVersions)) + }) + + By("setting storage class name", func() { + Expect(config.Defaults.Storage.CA.CA.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Peer.Peer.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Peer.StateDB.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Orderer.Orderer.Class).To(Equal(instance.Spec.Storage.Console.Class)) + }) + + By("setting CRN", func() { + crn := ¤t.CRN{ + CName: instance.Spec.CRN.CName, + CType: instance.Spec.CRN.CType, + Location: instance.Spec.CRN.Location, + Servicename: instance.Spec.CRN.Servicename, + Version: instance.Spec.CRN.Version, + AccountID: instance.Spec.CRN.AccountID, + } + Expect(config.CRN).To(Equal(crn)) + }) + }) + }) + + Context("update", func() { + It("return an error if no image pull secret provided", func() { + instance.Spec.ImagePullSecrets = nil + err := overrider.DeployerCM(instance, cm, resources.Update, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no image pull secret provided")) + }) + }) +}) diff --git a/pkg/offering/k8s/console/override/envcm.go b/pkg/offering/k8s/console/override/envcm.go new file mode 100644 index 00000000..f023ab48 --- /dev/null +++ b/pkg/offering/k8s/console/override/envcm.go @@ -0,0 +1,49 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) CM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateCM(instance, cm) + case resources.Update: + return o.UpdateCM(instance, cm) + } + + return nil +} + +func (o *Override) CreateCM(instance *current.IBPConsole, cm *corev1.ConfigMap) error { + cm.Data["HOST_URL"] = "https://" + instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain + ":443" + + err := o.CommonCM(instance, cm) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/offering/k8s/console/override/envcm_test.go b/pkg/offering/k8s/console/override/envcm_test.go new file mode 100644 index 00000000..afa0225a --- /dev/null +++ b/pkg/offering/k8s/console/override/envcm_test.go @@ -0,0 +1,87 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s Console Env Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + ConnectionString: "connection_string", + TLSSecretName: "tls_secret_name", + System: "system1", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../definitions/console/configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("appropriately overrides the respective values for env config map", func() { + err := overrider.CM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + By("setting HOST_URL", func() { + consolehost := instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain + Expect(cm.Data["HOST_URL"]).To(Equal(fmt.Sprintf("https://%s:443", consolehost))) + }) + + By("setting DB_CONNECTION_STRING", func() { + Expect(cm.Data["DB_CONNECTION_STRING"]).To(Equal(instance.Spec.ConnectionString)) + }) + + By("setting DB_SYSTEM", func() { + Expect(cm.Data["DB_SYSTEM"]).To(Equal(instance.Spec.System)) + }) + + By("setting KEY_FILE_PATH", func() { + Expect(cm.Data["KEY_FILE_PATH"]).To(Equal("/certs/tls/tls.key")) + }) + + By("setting PEM_FILE_PATH", func() { + Expect(cm.Data["PEM_FILE_PATH"]).To(Equal("/certs/tls/tls.crt")) + }) + }) + }) +}) diff --git a/pkg/offering/k8s/console/override/ingress.go b/pkg/offering/k8s/console/override/ingress.go new file mode 100644 index 00000000..34620789 --- /dev/null +++ b/pkg/offering/k8s/console/override/ingress.go @@ -0,0 +1,91 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Ingress(object v1.Object, ingress *networkingv1.Ingress, action resources.Action) error { + instance := object.(*current.IBPConsole) + + switch action { + case resources.Create: + return o.CreateIngress(instance, ingress) + case resources.Update: + return o.UpdateIngress(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngress(instance *current.IBPConsole, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) UpdateIngress(instance *current.IBPConsole, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) CommonIngress(instance *current.IBPConsole, ingress *networkingv1.Ingress) error { + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + consolehost := instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain + + pathType := networkingv1.PathTypeImplementationSpecific + ingress.Spec = networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + Host: consolehost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "optools", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + networkingv1.IngressTLS{ + Hosts: []string{consolehost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/console/override/ingress_test.go b/pkg/offering/k8s/console/override/ingress_test.go new file mode 100644 index 00000000..13044871 --- /dev/null +++ b/pkg/offering/k8s/console/override/ingress_test.go @@ -0,0 +1,93 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s Console Ingress Overrides", func() { + var ( + err error + overrider *override.Override + instance *current.IBPConsole + ingress *networkingv1.Ingress + consolehost string + ) + + BeforeEach(func() { + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + }, + }, + } + ingress, err = util.GetIngressFromFile("../../../../../definitions/console/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + consolehost = instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain + }) + + Context("create", func() { + It("appropriately overrides the respective values for ingress", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting rule", func() { + pathType := networkingv1.PathTypeImplementationSpecific + Expect(ingress.Spec.Rules).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1.IngressRule{ + Host: consolehost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "optools", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(1)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{consolehost})) + }) + }) + }) +}) diff --git a/pkg/offering/k8s/console/override/ingressv1beta1.go b/pkg/offering/k8s/console/override/ingressv1beta1.go new file mode 100644 index 00000000..14c38c24 --- /dev/null +++ b/pkg/offering/k8s/console/override/ingressv1beta1.go @@ -0,0 +1,86 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) Ingressv1beta1(object v1.Object, ingress *networkingv1beta1.Ingress, action resources.Action) error { + instance := object.(*current.IBPConsole) + + switch action { + case resources.Create: + return o.CreateIngressv1beta1(instance, ingress) + case resources.Update: + return o.UpdateIngressv1beta1(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngressv1beta1(instance *current.IBPConsole, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) UpdateIngressv1beta1(instance *current.IBPConsole, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) CommonIngressv1beta1(instance *current.IBPConsole, ingress *networkingv1beta1.Ingress) error { + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + consolehost := instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain + + ingress.Spec = networkingv1beta1.IngressSpec{ + Rules: []networkingv1beta1.IngressRule{ + networkingv1beta1.IngressRule{ + Host: consolehost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("optools"), + }, + Path: "/", + }, + }, + }, + }, + }, + }, + TLS: []networkingv1beta1.IngressTLS{ + networkingv1beta1.IngressTLS{ + Hosts: []string{consolehost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/console/override/ingressv1beta1_test.go b/pkg/offering/k8s/console/override/ingressv1beta1_test.go new file mode 100644 index 00000000..847038fb --- /dev/null +++ b/pkg/offering/k8s/console/override/ingressv1beta1_test.go @@ -0,0 +1,88 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s Console Ingress Overrides", func() { + var ( + err error + overrider *override.Override + instance *current.IBPConsole + ingress *networkingv1beta1.Ingress + consolehost string + ) + + BeforeEach(func() { + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + }, + }, + } + ingress, err = util.GetIngressv1beta1FromFile("../../../../../definitions/console/ingressv1beta1.yaml") + Expect(err).NotTo(HaveOccurred()) + + consolehost = instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain + }) + + Context("create", func() { + It("appropriately overrides the respective values for ingress", func() { + err := overrider.Ingressv1beta1(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting rule", func() { + Expect(ingress.Spec.Rules).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1beta1.IngressRule{ + Host: consolehost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("optools"), + }, + Path: "/", + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(1)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{consolehost})) + }) + }) + }) +}) diff --git a/pkg/offering/k8s/console/override/override.go b/pkg/offering/k8s/console/override/override.go new file mode 100644 index 00000000..039fed76 --- /dev/null +++ b/pkg/offering/k8s/console/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" +) + +type Override struct { + baseconsole.Override +} diff --git a/pkg/offering/k8s/console/override/override_suite_test.go b/pkg/offering/k8s/console/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/k8s/console/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/k8s/console/override/override_test.go b/pkg/offering/k8s/console/override/override_test.go new file mode 100644 index 00000000..06476a22 --- /dev/null +++ b/pkg/offering/k8s/console/override/override_test.go @@ -0,0 +1,131 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("K8S Console Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + ) + + BeforeEach(func() { + overrider = &override.Override{} + }) + + Context("Ingress", func() { + var ( + ingress *networkingv1.Ingress + ) + + BeforeEach(func() { + var err error + + ingress, err = util.GetIngressFromFile("../../../../../definitions/console/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress1", + Namespace: "namespace1", + }, + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain1", + }, + }, + } + }) + + When("creating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("creating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("updating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("updating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + }) +}) + +func VerifyIngressCommonOverrides(instance *current.IBPConsole, ingress *networkingv1.Ingress) { + By("setting annotation for custom ingress class", func() { + if instance.Spec.Ingress.Class != "" { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal(instance.Spec.Ingress.Class)) + } else { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal("nginx")) + } + }) + + By("setting api host in rules host", func() { + Expect(ingress.Spec.Rules[0].Host).To(Equal(instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain)) + }) + + By("setting api tls host", func() { + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{instance.Namespace + "-" + instance.Name + "-console" + "." + instance.Spec.NetworkInfo.Domain})) + }) + + By("setting backend service name", func() { + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(instance.Name)) + }) + + By("setting backend service port", func() { + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("optools")) + }) +} diff --git a/pkg/offering/k8s/orderer/node.go b/pkg/offering/k8s/orderer/node.go new file mode 100644 index 00000000..c4d6826a --- /dev/null +++ b/pkg/offering/k8s/orderer/node.go @@ -0,0 +1,266 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sorderer + +import ( + "context" + "fmt" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Override interface { + baseorderer.Override + Ingress(v1.Object, *networkingv1.Ingress, resources.Action) error + Ingressv1beta1(v1.Object, *networkingv1beta1.Ingress, resources.Action) error +} + +var _ baseorderer.IBPOrderer = &Node{} + +type Node struct { + *baseorderer.Node + + IngressManager resources.Manager + Ingressv1beta1Manager resources.Manager + + Override Override +} + +func NewNode(basenode *baseorderer.Node) *Node { + node := &Node{ + Node: basenode, + Override: &override.Override{}, + } + node.CreateManagers() + return node +} + +func (n *Node) CreateManagers() { + override := n.Override + resourceManager := resourcemanager.New(n.Client, n.Scheme) + n.IngressManager = resourceManager.CreateIngressManager("", override.Ingress, n.GetLabels, n.Config.OrdererInitConfig.IngressFile) + n.Ingressv1beta1Manager = resourceManager.CreateIngressv1beta1Manager("", override.Ingressv1beta1, n.GetLabels, n.Config.OrdererInitConfig.Ingressv1beta1File) +} + +func (n *Node) Reconcile(instance *current.IBPOrderer, update baseorderer.Update) (common.Result, error) { + var err error + var status *current.CRStatus + + log.Info(fmt.Sprintf("Reconciling node instance '%s' ... update: %+v", instance.Name, update)) + + versionSet, err := n.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + OverrideUpdateStatus: true, + }, nil + } + + instanceUpdated, err := n.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + externalEndpointUpdated := n.UpdateExternalEndpoint(instance) + + if instanceUpdated || externalEndpointUpdated { + log.Info(fmt.Sprintf("Updating instance after pre reconcile checks: %t, updating external endpoint: %t", + instanceUpdated, externalEndpointUpdated)) + + err = n.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated during reconcile checks, request will be requeued...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + Status: ¤t.CRStatus{ + Type: current.Initializing, + Reason: "Setting default values for either zone, region, and/or external endpoint", + Message: "Operator has updated spec with defaults as part of initialization", + }, + OverrideUpdateStatus: true, + }, nil + } + + err = n.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.OrdererInitilizationFailed, "failed to initialize orderer node") + } + + if update.OrdererTagUpdated() { + if err := n.ReconcileFabricOrdererMigration(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer versions") + } + } + + if update.MigrateToV2() { + if err := n.FabricOrdererMigrationV2_0(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer to version v2.x") + } + } + + if update.MigrateToV24() { + if err := n.FabricOrdererMigrationV2_4(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer to version v2.4.x") + } + } + + err = n.ReconcileManagers(instance, update, nil) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = n.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = n.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + err = n.UpdateParentStatus(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update parent's status") + } + + status, result, err := n.CustomLogic(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to run custom offering logic ") + } + if result != nil { + log.Info(fmt.Sprintf("Finished reconciling '%s' with Custom Logic result", instance.GetName())) + return *result, nil + } + + if update.EcertUpdated() { + log.Info("Ecert was updated") + // Request deployment restart for tls cert update + err = n.Restart.ForCertUpdate(commoninit.ECERT, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.TLSCertUpdated() { + log.Info("TLS cert was updated") + // Request deployment restart for ecert update + err = n.Restart.ForCertUpdate(commoninit.TLS, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.MSPUpdated() { + err = n.UpdateMSPCertificates(instance) + if err != nil { + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update certificates passed in MSP spec") + } + } + } + + if err := n.HandleActions(instance, update); err != nil { + return common.Result{}, err + } + + if err := n.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (n *Node) ReconcileManagers(instance *current.IBPOrderer, updated baseorderer.Update, genesisBlock []byte) error { + err := n.Node.ReconcileManagers(instance, updated, genesisBlock) + if err != nil { + return err + } + + update := updated.SpecUpdated() + + err = n.ReconcileIngressManager(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + + return nil +} + +func (n *Node) ReconcileIngressManager(instance *current.IBPOrderer, update bool) error { + if n.Config.Operator.Globals.AllowKubernetesEighteen == "true" { + // check k8s version + version, err := util.GetServerVersion() + if err != nil { + return err + } + if strings.Compare(version.Minor, "19") < 0 { // v1beta + err = n.Ingressv1beta1Manager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingressv1beta1 reconciliation") + } + } else { + err = n.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + } else { + err := n.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + return nil +} diff --git a/pkg/offering/k8s/orderer/orderer.go b/pkg/offering/k8s/orderer/orderer.go new file mode 100644 index 00000000..951f10cd --- /dev/null +++ b/pkg/offering/k8s/orderer/orderer.go @@ -0,0 +1,173 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sorderer + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + baseordereroverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("k8s_orderer") + +const ( + defaultOrdererNode = "./definitions/orderer/orderernode.yaml" +) + +type Orderer struct { + *baseorderer.Orderer +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config) *Orderer { + o := &override.Override{ + Override: baseordereroverride.Override{ + Client: client, + Config: config, + }, + } + + orderer := &Orderer{ + Orderer: baseorderer.New(client, scheme, config, o), + } + + return orderer +} + +func (o *Orderer) Reconcile(instance *current.IBPOrderer, update baseorderer.Update) (common.Result, error) { + var err error + + if instance.Spec.NodeNumber == nil { + log.Info(fmt.Sprintf("Reconciling cluster instance '%s' ... update: %+v", instance.Name, update)) + + versionSet, err := o.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + if instance.Status.Status == "" || instance.Status.Status == current.False || (instance.Status.Version != "" && version.String(instance.Status.Version).GreaterThan(version.V210)) { + instanceUpdated, err := o.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + OverrideUpdateStatus: true, + }, nil + } + } + } + + // TODO: Major rehaul is needed of versioning and migration strategy. Need a way to + // migrate as first step to get CR spec in appropriate state to avoid versioning checks + // like below and above + if (instance.Status.Version == "" && instance.Status.Status == current.True) || (instance.Status.Version != "" && version.String(instance.Status.Version).Equal(version.V210)) { + if instance.Spec.NodeNumber == nil { + number := 1 + instance.Spec.NodeNumber = &number + } + } + + var result common.Result + if instance.Spec.NodeNumber == nil { + result, err := o.ReconcileCluster(instance, update, o.AddHostPortToProfile) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile cluster") + } + + return result, nil + } + + result, err = o.ReconcileNode(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile node") + } + + return result, nil +} + +func (o *Orderer) ReconcileNode(instance *current.IBPOrderer, update baseorderer.Update) (common.Result, error) { + var err error + + hostAPI := fmt.Sprintf("%s-%s-orderer.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostOperations := fmt.Sprintf("%s-%s-operations.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostGrpc := fmt.Sprintf("%s-%s-grpcweb.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hosts := []string{} + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + hostAdmin := fmt.Sprintf("%s-%s-admin.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hosts = append(hosts, hostAPI, hostOperations, hostGrpc, hostAdmin, "127.0.0.1") + //TODO: need to Re-enroll when orderer migrated from 1.4.x/2.2.x to 2.4.1 + } else { + hosts = append(hosts, hostAPI, hostOperations, hostGrpc, "127.0.0.1") + } + + o.CheckCSRHosts(instance, hosts) + + k8snode := NewNode(baseorderer.NewNode(o.Client, o.Scheme, o.Config, instance.GetName(), o.RenewCertTimers, o.RestartManager)) + + log.Info(fmt.Sprintf("Reconciling Orderer node %s", instance.GetName())) + if !instance.Spec.IsUsingChannelLess() && instance.Spec.GenesisBlock == "" && !(instance.Spec.IsPrecreateOrderer()) { + return common.Result{}, fmt.Errorf("Genesis block not provided for orderer node: %s", instance.GetName()) + } + + result, err := k8snode.Reconcile(instance, update) + if err != nil { + return result, err + } + return result, nil +} + +func (o *Orderer) GetNodes(instance *current.IBPOrderer) []*Node { + size := instance.Spec.ClusterSize + nodes := []*Node{} + for i := 1; i <= size; i++ { + node := o.GetNode(i) + nodes = append(nodes, node) + } + return nodes +} + +func (o *Orderer) GetNode(nodeNumber int) *Node { + basenode := o.NodeManager.GetNode(nodeNumber, o.RenewCertTimers, o.RestartManager) + return NewNode(basenode) +} diff --git a/pkg/offering/k8s/orderer/orderer_suite_test.go b/pkg/offering/k8s/orderer/orderer_suite_test.go new file mode 100644 index 00000000..99e4d4f3 --- /dev/null +++ b/pkg/offering/k8s/orderer/orderer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sorderer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOrderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Orderer Suite") +} diff --git a/pkg/offering/k8s/orderer/orderer_test.go b/pkg/offering/k8s/orderer/orderer_test.go new file mode 100644 index 00000000..91311c30 --- /dev/null +++ b/pkg/offering/k8s/orderer/orderer_test.go @@ -0,0 +1,86 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8sorderer_test + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/mocks" + k8sorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("K8S Orderer", func() { + var ( + orderer *k8sorderer.Orderer + instance *current.IBPOrderer + mockKubeClient *cmocks.Client + cfg *config.Config + update *mocks.Update + ) + + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + ClusterSecret: []*current.SecretSpec{}, + Secret: ¤t.SecretSpec{}, + GenesisBlock: "GenesisBlock", + Images: ¤t.OrdererImages{}, + }, + } + instance.Kind = "IBPOrderer" + + cfg = &config.Config{ + OrdererInitConfig: &ordererinit.Config{ + ConfigTxFile: "../../../../defaultconfig/orderer/configtx.yaml", + OUFile: "../../../../defaultconfig/orderer/ouconfig.yaml", + }, + } + + orderer = &k8sorderer.Orderer{ + Orderer: &baseorderer.Orderer{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: cfg, + }, + } + }) + + Context("Reconciles", func() { + It("reconciles IBPOrderer", func() { + _, err := orderer.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/offering/k8s/orderer/override/ingress.go b/pkg/offering/k8s/orderer/override/ingress.go new file mode 100644 index 00000000..a790c249 --- /dev/null +++ b/pkg/offering/k8s/orderer/override/ingress.go @@ -0,0 +1,177 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/version" + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Ingress(object v1.Object, ingress *networkingv1.Ingress, action resources.Action) error { + instance := object.(*current.IBPOrderer) + + switch action { + case resources.Create: + return o.CreateIngress(instance, ingress) + case resources.Update: + return o.UpdateIngress(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngress(instance *current.IBPOrderer, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) UpdateIngress(instance *current.IBPOrderer, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) CommonIngress(instance *current.IBPOrderer, ingress *networkingv1.Ingress) error { + + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + apihost := instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain + operationshost := instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost := instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + + pathType := networkingv1.PathTypeImplementationSpecific + ingress.Spec = networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "orderer-grpc", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + networkingv1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "operations", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + networkingv1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "grpcweb", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + networkingv1.IngressTLS{ + Hosts: []string{apihost}, + }, + networkingv1.IngressTLS{ + Hosts: []string{operationshost}, + }, + networkingv1.IngressTLS{ + Hosts: []string{grpcwebhost}, + }, + }, + } + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + adminhost := instance.Namespace + "-" + instance.Name + "-admin" + "." + instance.Spec.Domain + adminIngressRule := []networkingv1.IngressRule{ + networkingv1.IngressRule{ + Host: adminhost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "orderer-admin", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + } + + admintls := []networkingv1.IngressTLS{ + networkingv1.IngressTLS{ + Hosts: []string{adminhost}, + }, + } + ingress.Spec.Rules = append(ingress.Spec.Rules, adminIngressRule...) + ingress.Spec.TLS = append(ingress.Spec.TLS, admintls...) + } + return nil +} diff --git a/pkg/offering/k8s/orderer/override/ingress_test.go b/pkg/offering/k8s/orderer/override/ingress_test.go new file mode 100644 index 00000000..8bd72bd2 --- /dev/null +++ b/pkg/offering/k8s/orderer/override/ingress_test.go @@ -0,0 +1,140 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s Orderer Ingress Overrides", func() { + var ( + err error + overrider *override.Override + instance *current.IBPOrderer + ingress *networkingv1.Ingress + apihost string + operationshost string + grpcwebhost string + ) + + BeforeEach(func() { + overrider = &override.Override{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Domain: "test.domain", + }, + } + ingress, err = util.GetIngressFromFile("../../../../../definitions/orderer/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + apihost = instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain + operationshost = instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost = instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + }) + + Context("create", func() { + It("appropriately overrides the respective values for ingress", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting rules", func() { + pathType := networkingv1.PathTypeImplementationSpecific + Expect(ingress.Spec.Rules).To(HaveLen(3)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "orderer-grpc", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[1]).To(Equal(networkingv1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "operations", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[2]).To(Equal(networkingv1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "grpcweb", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(3)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{apihost})) + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{operationshost})) + Expect(ingress.Spec.TLS[2].Hosts).To(Equal([]string{grpcwebhost})) + }) + + }) + }) +}) diff --git a/pkg/offering/k8s/orderer/override/ingressv1beta1.go b/pkg/offering/k8s/orderer/override/ingressv1beta1.go new file mode 100644 index 00000000..fdff959a --- /dev/null +++ b/pkg/offering/k8s/orderer/override/ingressv1beta1.go @@ -0,0 +1,127 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) Ingressv1beta1(object v1.Object, ingress *networkingv1beta1.Ingress, action resources.Action) error { + instance := object.(*current.IBPOrderer) + + switch action { + case resources.Create: + return o.CreateIngressv1beta1(instance, ingress) + case resources.Update: + return o.UpdateIngressv1beta1(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngressv1beta1(instance *current.IBPOrderer, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) UpdateIngressv1beta1(instance *current.IBPOrderer, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) CommonIngressv1beta1(instance *current.IBPOrderer, ingress *networkingv1beta1.Ingress) error { + + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + apihost := instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain + operationshost := instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost := instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + + ingress.Spec = networkingv1beta1.IngressSpec{ + Rules: []networkingv1beta1.IngressRule{ + networkingv1beta1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("orderer-grpc"), + }, + Path: "/", + }, + }, + }, + }, + }, + networkingv1beta1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("operations"), + }, + Path: "/", + }, + }, + }, + }, + }, + networkingv1beta1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("grpcweb"), + }, + Path: "/", + }, + }, + }, + }, + }, + }, + TLS: []networkingv1beta1.IngressTLS{ + networkingv1beta1.IngressTLS{ + Hosts: []string{apihost}, + }, + networkingv1beta1.IngressTLS{ + Hosts: []string{operationshost}, + }, + networkingv1beta1.IngressTLS{ + Hosts: []string{grpcwebhost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/orderer/override/ingressv1beta1_test.go b/pkg/offering/k8s/orderer/override/ingressv1beta1_test.go new file mode 100644 index 00000000..2fefb0d5 --- /dev/null +++ b/pkg/offering/k8s/orderer/override/ingressv1beta1_test.go @@ -0,0 +1,125 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s Orderer Ingress Overrides", func() { + var ( + err error + overrider *override.Override + instance *current.IBPOrderer + ingress *networkingv1beta1.Ingress + apihost string + operationshost string + grpcwebhost string + ) + + BeforeEach(func() { + overrider = &override.Override{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Domain: "test.domain", + }, + } + ingress, err = util.GetIngressv1beta1FromFile("../../../../../definitions/orderer/ingressv1beta1.yaml") + Expect(err).NotTo(HaveOccurred()) + + apihost = instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain + operationshost = instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost = instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + }) + + Context("create", func() { + It("appropriately overrides the respective values for ingress", func() { + err := overrider.Ingressv1beta1(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + By("setting rules", func() { + Expect(ingress.Spec.Rules).To(HaveLen(3)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1beta1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("orderer-grpc"), + }, + Path: "/", + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[1]).To(Equal(networkingv1beta1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("operations"), + }, + Path: "/", + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[2]).To(Equal(networkingv1beta1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("grpcweb"), + }, + Path: "/", + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(3)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{apihost})) + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{operationshost})) + Expect(ingress.Spec.TLS[2].Hosts).To(Equal([]string{grpcwebhost})) + }) + + }) + }) +}) diff --git a/pkg/offering/k8s/orderer/override/override.go b/pkg/offering/k8s/orderer/override/override.go new file mode 100644 index 00000000..f7e2d1ee --- /dev/null +++ b/pkg/offering/k8s/orderer/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" +) + +type Override struct { + baseorderer.Override +} diff --git a/pkg/offering/k8s/orderer/override/override_suite_test.go b/pkg/offering/k8s/orderer/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/k8s/orderer/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/k8s/orderer/override/override_test.go b/pkg/offering/k8s/orderer/override/override_test.go new file mode 100644 index 00000000..176afc85 --- /dev/null +++ b/pkg/offering/k8s/orderer/override/override_test.go @@ -0,0 +1,161 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("K8s Orderer Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPOrderer + ) + + BeforeEach(func() { + overrider = &override.Override{} + }) + + Context("Ingress", func() { + var ( + ingress *networkingv1.Ingress + ) + + BeforeEach(func() { + var err error + + ingress, err = util.GetIngressFromFile("../../../../../definitions/orderer/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPOrderer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress1", + Namespace: "namespace1", + }, + Spec: current.IBPOrdererSpec{ + Domain: "domain1", + }, + } + }) + + When("creating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("creating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("updating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + + When("updating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress) + }) + }) + }) +}) + +func VerifyIngressCommonOverrides(instance *current.IBPOrderer, ingress *networkingv1.Ingress) { + By("setting annotation for custom ingress class", func() { + if instance.Spec.Ingress.Class != "" { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal(instance.Spec.Ingress.Class)) + } else { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal("nginx")) + } + }) + + By("setting api host in rules host", func() { + Expect(ingress.Spec.Rules[0].Host).To(Equal(instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain)) + }) + + By("setting api tls host", func() { + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{instance.Namespace + "-" + instance.Name + "-orderer" + "." + instance.Spec.Domain})) + }) + + By("setting backend service name", func() { + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(instance.Name)) + }) + + By("setting backend service port", func() { + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("orderer-grpc")) + }) + + By("setting operations host in rules host", func() { + Expect(ingress.Spec.Rules[1].Host).To(Equal(instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain)) + }) + + By("setting operations tls host", func() { + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain})) + }) + + By("setting backend service name", func() { + Expect(ingress.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Name).To(Equal(instance.Name)) + }) + + By("setting backend service port", func() { + Expect(ingress.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("operations")) + }) + + By("setting operations host in rules host", func() { + Expect(ingress.Spec.Rules[2].Host).To(Equal(instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain)) + }) + + By("setting operations tls host", func() { + Expect(ingress.Spec.TLS[2].Hosts).To(Equal([]string{instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain})) + }) + + By("setting backend service name", func() { + Expect(ingress.Spec.Rules[2].HTTP.Paths[0].Backend.Service.Name).To(Equal(instance.Name)) + }) + + By("setting backend service port", func() { + Expect(ingress.Spec.Rules[2].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("grpcweb")) + }) +} diff --git a/pkg/offering/k8s/peer/override/ingress.go b/pkg/offering/k8s/peer/override/ingress.go new file mode 100644 index 00000000..357552e6 --- /dev/null +++ b/pkg/offering/k8s/peer/override/ingress.go @@ -0,0 +1,141 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) Ingress(object v1.Object, ingress *networkingv1.Ingress, action resources.Action) error { + instance := object.(*current.IBPPeer) + + switch action { + case resources.Create: + return o.CreateIngress(instance, ingress) + case resources.Update: + return o.UpdateIngress(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngress(instance *current.IBPPeer, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) UpdateIngress(instance *current.IBPPeer, ingress *networkingv1.Ingress) error { + return o.CommonIngress(instance, ingress) +} + +func (o *Override) CommonIngress(instance *current.IBPPeer, ingress *networkingv1.Ingress) error { + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + apihost := instance.Namespace + "-" + instance.Name + "-peer" + "." + instance.Spec.Domain + operationshost := instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost := instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + + pathType := networkingv1.PathTypeImplementationSpecific + ingress.Spec = networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + networkingv1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "peer-api", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + networkingv1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "operations", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + networkingv1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "grpcweb", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + networkingv1.IngressTLS{ + Hosts: []string{apihost}, + }, + networkingv1.IngressTLS{ + Hosts: []string{operationshost}, + }, + networkingv1.IngressTLS{ + Hosts: []string{grpcwebhost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/peer/override/ingress_test.go b/pkg/offering/k8s/peer/override/ingress_test.go new file mode 100644 index 00000000..0ee00237 --- /dev/null +++ b/pkg/offering/k8s/peer/override/ingress_test.go @@ -0,0 +1,193 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("K8s Peer Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPPeer + ) + + BeforeEach(func() { + overrider = &override.Override{} + }) + + Context("Ingress", func() { + var ( + err error + ingress *networkingv1.Ingress + apihost string + operationshost string + grpcwebhost string + ) + + BeforeEach(func() { + ingress, err = util.GetIngressFromFile("../../../../../definitions/peer/ingress.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress1", + Namespace: "namespace1", + }, + Spec: current.IBPPeerSpec{ + Domain: "domain1", + }, + } + + apihost = instance.Namespace + "-" + instance.Name + "-peer" + "." + instance.Spec.Domain + operationshost = instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost = instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + }) + + When("creating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + + When("creating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + + When("updating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + + When("updating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingress(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverrides(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + }) +}) + +func VerifyIngressCommonOverrides(instance *current.IBPPeer, ingress *networkingv1.Ingress, apihost, operationshost, grpcwebhost string) { + By("setting annotation for custom ingress class", func() { + if instance.Spec.Ingress.Class != "" { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal(instance.Spec.Ingress.Class)) + } else { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal("nginx")) + } + }) + By("setting rules", func() { + pathType := networkingv1.PathTypeImplementationSpecific + Expect(ingress.Spec.Rules).To(HaveLen(3)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "peer-api", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[1]).To(Equal(networkingv1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "operations", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[2]).To(Equal(networkingv1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + networkingv1.HTTPIngressPath{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: instance.GetName(), + Port: networkingv1.ServiceBackendPort{ + Name: "grpcweb", + }, + }, + }, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(3)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{apihost})) + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{operationshost})) + Expect(ingress.Spec.TLS[2].Hosts).To(Equal([]string{grpcwebhost})) + }) +} diff --git a/pkg/offering/k8s/peer/override/ingressv1beta1.go b/pkg/offering/k8s/peer/override/ingressv1beta1.go new file mode 100644 index 00000000..99155c95 --- /dev/null +++ b/pkg/offering/k8s/peer/override/ingressv1beta1.go @@ -0,0 +1,126 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) Ingressv1beta1(object v1.Object, ingress *networkingv1beta1.Ingress, action resources.Action) error { + instance := object.(*current.IBPPeer) + + switch action { + case resources.Create: + return o.CreateIngressv1beta1(instance, ingress) + case resources.Update: + return o.UpdateIngressv1beta1(instance, ingress) + } + + return nil +} + +func (o *Override) CreateIngressv1beta1(instance *current.IBPPeer, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) UpdateIngressv1beta1(instance *current.IBPPeer, ingress *networkingv1beta1.Ingress) error { + return o.CommonIngressv1beta1(instance, ingress) +} + +func (o *Override) CommonIngressv1beta1(instance *current.IBPPeer, ingress *networkingv1beta1.Ingress) error { + ingressClass := "nginx" + if instance.Spec.Ingress.Class != "" { + ingressClass = instance.Spec.Ingress.Class + } + ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = ingressClass + + apihost := instance.Namespace + "-" + instance.Name + "-peer" + "." + instance.Spec.Domain + operationshost := instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost := instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + + ingress.Spec = networkingv1beta1.IngressSpec{ + Rules: []networkingv1beta1.IngressRule{ + networkingv1beta1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("peer-api"), + }, + Path: "/", + }, + }, + }, + }, + }, + networkingv1beta1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("operations"), + }, + Path: "/", + }, + }, + }, + }, + }, + networkingv1beta1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("grpcweb"), + }, + Path: "/", + }, + }, + }, + }, + }, + }, + TLS: []networkingv1beta1.IngressTLS{ + networkingv1beta1.IngressTLS{ + Hosts: []string{apihost}, + }, + networkingv1beta1.IngressTLS{ + Hosts: []string{operationshost}, + }, + networkingv1beta1.IngressTLS{ + Hosts: []string{grpcwebhost}, + }, + }, + } + + return nil +} diff --git a/pkg/offering/k8s/peer/override/ingressv1beta1_test.go b/pkg/offering/k8s/peer/override/ingressv1beta1_test.go new file mode 100644 index 00000000..27eaeb13 --- /dev/null +++ b/pkg/offering/k8s/peer/override/ingressv1beta1_test.go @@ -0,0 +1,178 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var _ = Describe("K8s Peer Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPPeer + ) + + BeforeEach(func() { + overrider = &override.Override{} + }) + + Context("Ingress", func() { + var ( + err error + ingress *networkingv1beta1.Ingress + apihost string + operationshost string + grpcwebhost string + ) + + BeforeEach(func() { + ingress, err = util.GetIngressv1beta1FromFile("../../../../../definitions/peer/ingressv1beta1.yaml") + Expect(err).NotTo(HaveOccurred()) + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress1", + Namespace: "namespace1", + }, + Spec: current.IBPPeerSpec{ + Domain: "domain1", + }, + } + + apihost = instance.Namespace + "-" + instance.Name + "-peer" + "." + instance.Spec.Domain + operationshost = instance.Namespace + "-" + instance.Name + "-operations" + "." + instance.Spec.Domain + grpcwebhost = instance.Namespace + "-" + instance.Name + "-grpcweb" + "." + instance.Spec.Domain + }) + + When("creating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingressv1beta1(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverridesv1beta1(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + + When("creating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingressv1beta1(instance, ingress, resources.Create) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverridesv1beta1(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + + When("updating ingress", func() { + It("sets appropriate values", func() { + err := overrider.Ingressv1beta1(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverridesv1beta1(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + + When("updating ingress with custom class", func() { + It("sets appropriate values", func() { + instance.Spec.Ingress = current.Ingress{ + Class: "custom", + } + err := overrider.Ingressv1beta1(instance, ingress, resources.Update) + Expect(err).NotTo(HaveOccurred()) + VerifyIngressCommonOverridesv1beta1(instance, ingress, apihost, operationshost, grpcwebhost) + }) + }) + }) +}) + +func VerifyIngressCommonOverridesv1beta1(instance *current.IBPPeer, ingress *networkingv1beta1.Ingress, apihost, operationshost, grpcwebhost string) { + By("setting annotation for custom ingress class", func() { + if instance.Spec.Ingress.Class != "" { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal(instance.Spec.Ingress.Class)) + } else { + Expect(ingress.ObjectMeta.Annotations["kubernetes.io/ingress.class"]).To(Equal("nginx")) + } + }) + By("setting rules", func() { + Expect(ingress.Spec.Rules).To(HaveLen(3)) + Expect(ingress.Spec.Rules[0]).To(Equal(networkingv1beta1.IngressRule{ + Host: apihost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("peer-api"), + }, + Path: "/", + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[1]).To(Equal(networkingv1beta1.IngressRule{ + Host: operationshost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("operations"), + }, + Path: "/", + }, + }, + }, + }, + })) + Expect(ingress.Spec.Rules[2]).To(Equal(networkingv1beta1.IngressRule{ + Host: grpcwebhost, + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ + Backend: networkingv1beta1.IngressBackend{ + ServiceName: instance.GetName(), + ServicePort: intstr.FromString("grpcweb"), + }, + Path: "/", + }, + }, + }, + }, + })) + }) + + By("setting TLS hosts", func() { + Expect(ingress.Spec.TLS).To(HaveLen(3)) + Expect(ingress.Spec.TLS[0].Hosts).To(Equal([]string{apihost})) + Expect(ingress.Spec.TLS[1].Hosts).To(Equal([]string{operationshost})) + Expect(ingress.Spec.TLS[2].Hosts).To(Equal([]string{grpcwebhost})) + }) +} diff --git a/pkg/offering/k8s/peer/override/override.go b/pkg/offering/k8s/peer/override/override.go new file mode 100644 index 00000000..388d3326 --- /dev/null +++ b/pkg/offering/k8s/peer/override/override.go @@ -0,0 +1,29 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" +) + +type Override struct { + basepeer.Override + Client controllerclient.Client +} diff --git a/pkg/offering/k8s/peer/override/override_suite_test.go b/pkg/offering/k8s/peer/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/k8s/peer/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/k8s/peer/peer.go b/pkg/offering/k8s/peer/peer.go new file mode 100644 index 00000000..40ae583b --- /dev/null +++ b/pkg/offering/k8s/peer/peer.go @@ -0,0 +1,292 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8speer + +import ( + "context" + "fmt" + "strings" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + basepeeroverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("k8s_peer") + +type Override interface { + basepeer.Override + Ingress(v1.Object, *networkingv1.Ingress, resources.Action) error + Ingressv1beta1(v1.Object, *networkingv1beta1.Ingress, resources.Action) error +} + +var _ basepeer.IBPPeer = &Peer{} + +type Peer struct { + *basepeer.Peer + + IngressManager resources.Manager + Ingressv1beta1Manager resources.Manager + + Override Override +} + +func New(client controllerclient.Client, scheme *runtime.Scheme, config *config.Config) *Peer { + o := &override.Override{ + Override: basepeeroverride.Override{ + Client: client, + DefaultCouchContainerFile: config.PeerInitConfig.CouchContainerFile, + DefaultCouchInitContainerFile: config.PeerInitConfig.CouchInitContainerFile, + DefaultCCLauncherFile: config.PeerInitConfig.CCLauncherFile, + }, + } + + p := &Peer{ + Peer: basepeer.New(client, scheme, config, o), + Override: o, + } + + p.CreateManagers() + return p +} + +func (p *Peer) CreateManagers() { + resourceManager := resourcemanager.New(p.Client, p.Scheme) + p.IngressManager = resourceManager.CreateIngressManager("", p.Override.Ingress, p.GetLabels, p.Config.PeerInitConfig.IngressFile) + p.Ingressv1beta1Manager = resourceManager.CreateIngressv1beta1Manager("", p.Override.Ingressv1beta1, p.GetLabels, p.Config.PeerInitConfig.Ingressv1beta1File) +} + +func (p *Peer) ReconcileManagers(instance *current.IBPPeer, update basepeer.Update) error { + err := p.Peer.ReconcileManagers(instance, update) + if err != nil { + return err + } + + err = p.ReconcileIngressManager(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + + return nil +} + +func (p *Peer) Reconcile(instance *current.IBPPeer, update basepeer.Update) (common.Result, error) { + var err error + var status *current.CRStatus + + versionSet, err := p.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := p.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + // We do not have to wait for service to get the external endpoint + // thus we call UpdateExternalEndpoint in reconcile before reconcile managers + externalEndpointUpdated := p.UpdateExternalEndpoint(instance) + + hostAPI := fmt.Sprintf("%s-%s-peer.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostOperations := fmt.Sprintf("%s-%s-operations.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostGrpcWeb := fmt.Sprintf("%s-%s-grpcweb.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hosts := []string{hostAPI, hostOperations, hostGrpcWeb, "127.0.0.1"} + csrHostUpdated := p.CheckCSRHosts(instance, hosts) + + if instanceUpdated || externalEndpointUpdated || csrHostUpdated { + log.Info(fmt.Sprintf("Updating instance after pre reconcile checks: %t, updating external endpoint: %t, csr host updated: %t", instanceUpdated, externalEndpointUpdated, csrHostUpdated)) + err := p.Client.Patch(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance after prereconcile checks") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + jobRunning, err := p.HandleMigrationJobs(k8sclient.MatchingLabels{ + "owner": instance.GetName(), + "job-name": fmt.Sprintf("%s-dbmigration", instance.GetName()), + }, instance) + if jobRunning { + log.Info(fmt.Sprintf("Requeuing request until job completes")) + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + if err != nil { + return common.Result{}, err + } + + err = p.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.PeerInitilizationFailed, "failed to initialize peer") + } + + if update.PeerTagUpdated() { + if err := p.ReconcileFabricPeerMigrationV1_4(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer versions") + } + } + + if update.MigrateToV2() { + if err := p.ReconcileFabricPeerMigrationV2_0(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer to version v2.0.x") + } + } + + if update.MigrateToV24() { + if err := p.ReconcileFabricPeerMigrationV2_4(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer to version v2.4.x") + } + } + + err = p.ReconcileManagers(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = p.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = p.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + // custom product logic can be implemented here + // No-Op atm + status, result, err := p.CustomLogic(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to run custom offering logic") + } + if result != nil { + log.Info(fmt.Sprintf("Finished reconciling '%s' with Custom Logic result", instance.GetName())) + return *result, nil + } + + if update.EcertUpdated() { + log.Info("Ecert was updated") + // Request deployment restart for tls cert update + err = p.Restart.ForCertUpdate(commoninit.ECERT, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.TLSCertUpdated() { + log.Info("TLS cert was updated") + // Request deployment restart for ecert update + err = p.Restart.ForCertUpdate(commoninit.TLS, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.MSPUpdated() { + err = p.UpdateMSPCertificates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update certificates passed in MSP spec") + } + } + + if err := p.HandleActions(instance, update); err != nil { + return common.Result{}, err + } + + // If configs were update during initialize, need to restart pods to pick up new + // config changes. This should be done as the last the step, specifically after ReconcileManagers, + // to allow all any updates to the deployment to be completed before restarting. + // Trigger deployment restart by deleting deployment + if err := p.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (p *Peer) ReconcileIngressManager(instance *current.IBPPeer, update bool) error { + if p.Config.Operator.Globals.AllowKubernetesEighteen == "true" { + // check k8s version + version, err := util.GetServerVersion() + if err != nil { + return err + } + if strings.Compare(version.Minor, "19") < 0 { // v1beta + err = p.Ingressv1beta1Manager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingressv1beta1 reconciliation") + } + } else { + err = p.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + } else { + err := p.IngressManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Ingress reconciliation") + } + } + return nil +} diff --git a/pkg/offering/k8s/peer/peer_suite_test.go b/pkg/offering/k8s/peer/peer_suite_test.go new file mode 100644 index 00000000..ad9830b2 --- /dev/null +++ b/pkg/offering/k8s/peer/peer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8speer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} diff --git a/pkg/offering/k8s/peer/peer_test.go b/pkg/offering/k8s/peer/peer_test.go new file mode 100644 index 00000000..b55d9120 --- /dev/null +++ b/pkg/offering/k8s/peer/peer_test.go @@ -0,0 +1,254 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package k8speer_test + +import ( + "context" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + k8speer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/k8s/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("K8s Peer", func() { + var ( + peer *k8speer.Peer + instance *current.IBPPeer + mockKubeClient *cmocks.Client + cfg *config.Config + + deploymentMgr *mocks.DeploymentManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + couchPvcMgr *managermocks.ResourceManager + configMapMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + ingressMgr *managermocks.ResourceManager + update *mocks.Update + certificateMgr *mocks.CertificateManager + ) + + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPPeer{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPPeer", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "peer1", + Namespace: "random", + }, + Spec: current.IBPPeerSpec{ + PeerExternalEndpoint: "address", + Domain: "domain", + StateDb: "couchdb", + Replicas: &replicas, + Images: ¤t.PeerImages{}, + FabricVersion: "1.4.9", + }, + Status: current.IBPPeerStatus{ + CRStatus: current.CRStatus{ + Version: version.Operator, + }, + }, + } + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *current.IBPPeer: + o := obj.(*current.IBPPeer) + o.Kind = "IBPPeer" + instance = o + case *corev1.Service: + o := obj.(*corev1.Service) + o.Spec.Type = corev1.ServiceTypeNodePort + o.Spec.Ports = append(o.Spec.Ports, corev1.ServicePort{ + Name: "peer-api", + TargetPort: intstr.IntOrString{ + IntVal: 7051, + }, + NodePort: int32(7051), + }) + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case "ecert-" + instance.Name + "-cacerts": + o.Name = "tls-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cacert-0.pem": []byte("")} + default: + o.Name = "tls-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + } + } + return nil + } + instance.Status.Version = version.Operator + + deploymentMgr = &mocks.DeploymentManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + couchPvcMgr = &managermocks.ResourceManager{} + configMapMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + ingressMgr = &managermocks.ResourceManager{} + certificateMgr = &mocks.CertificateManager{} + restartMgr := &mocks.RestartManager{} + + scheme := &runtime.Scheme{} + cfg = &config.Config{ + PeerInitConfig: &peerinit.Config{ + OUFile: "../../../../defaultconfig/peer/ouconfig.yaml", + CorePeerFile: "../../../../defaultconfig/peer/core.yaml", + }, + } + initializer := &mocks.InitializeIBPPeer{} + initializer.GetInitPeerReturns(&peerinit.Peer{}, nil) + peer = &k8speer.Peer{ + Peer: &basepeer.Peer{ + Client: mockKubeClient, + Scheme: scheme, + Config: cfg, + + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + StateDBPVCManager: couchPvcMgr, + FluentDConfigMapManager: configMapMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Initializer: initializer, + CertificateManager: certificateMgr, + Restart: restartMgr, + }, + IngressManager: ingressMgr, + } + }) + + Context("Reconciles", func() { + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if couch pvc manager fails to reconcile", func() { + couchPvcMgr.ReconcileReturns(errors.New("failed to reconcile couch pvc")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed CouchDB PVC reconciliation: failed to reconcile couch pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role binding")) + }) + + It("returns an error if service account binding manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile service account")) + }) + + It("returns an error if config map manager fails to reconcile", func() { + configMapMgr.ReconcileReturns(errors.New("failed to reconcile config map")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed FluentD ConfigMap reconciliation: failed to reconcile config map")) + }) + + It("returns a breaking error if initialization fails", func() { + cfg.PeerInitConfig.CorePeerFile = "../../../../../defaultconfig/peer/badfile.yaml" + peer.Initializer = peerinit.New(cfg.PeerInitConfig, nil, nil, nil, nil, enroller.HSMEnrollJobTimeouts{}) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 22 - failed to initialize peer: open")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("does not return an error on a successful reconcile", func() { + _, err := peer.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("ExternalEndpoint", func() { + It("Updates the external endpoint, When external endpoint is not defined", func() { + instance.Namespace = "namespace" + instance.Name = "name" + instance.Spec.PeerExternalEndpoint = "" + instance.Spec.Domain = "1.2.3.4" + + updated := peer.UpdateExternalEndpoint(instance) + Expect(updated).To(Equal(true)) + Expect(instance.Spec.PeerExternalEndpoint).To(Equal("namespace-name-peer.1.2.3.4:443")) + }) + }) +}) diff --git a/pkg/offering/offering.go b/pkg/offering/offering.go new file mode 100644 index 00000000..bdd75265 --- /dev/null +++ b/pkg/offering/offering.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package offering + +import ( + "errors" + "fmt" + "strings" +) + +type Type string + +func (t Type) String() string { + return string(t) +} + +const ( + OPENSHIFT Type = "OPENSHIFT" + K8S Type = "K8S" +) + +func GetType(oType string) (Type, error) { + switch strings.ToLower(oType) { + case "openshift": + return OPENSHIFT, nil + case "k8s": + return K8S, nil + } + return "", errors.New(fmt.Sprintf("Cluster Type %s not supported", oType)) +} diff --git a/pkg/offering/offering_suite_test.go b/pkg/offering/offering_suite_test.go new file mode 100644 index 00000000..110495f3 --- /dev/null +++ b/pkg/offering/offering_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package offering_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOffering(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Offering Suite") +} diff --git a/pkg/offering/offering_test.go b/pkg/offering/offering_test.go new file mode 100644 index 00000000..6eb853b7 --- /dev/null +++ b/pkg/offering/offering_test.go @@ -0,0 +1,55 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package offering_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Peer configuration", func() { + Context("get type", func() { + + It("returns type OPENSHIFT", func() { + t, err := offering.GetType("OPENSHIFT") + Expect(err).To(BeNil()) + Expect(t).To(Equal(offering.OPENSHIFT)) + + t, err = offering.GetType("openshift") + Expect(err).To(BeNil()) + Expect(t).To(Equal(offering.OPENSHIFT)) + }) + + It("returns an error for unrecongized input", func() { + _, err := offering.GetType("foo") + Expect(err).NotTo(BeNil()) + }) + + It("returns type K8S for input of k8s", func() { + t, err := offering.GetType("K8S") + Expect(err).To(BeNil()) + Expect(t).To(Equal(offering.K8S)) + + t, err = offering.GetType("k8s") + Expect(err).To(BeNil()) + Expect(t).To(Equal(offering.K8S)) + }) + }) +}) diff --git a/pkg/offering/openshift/ca/ca.go b/pkg/offering/openshift/ca/ca.go new file mode 100644 index 00000000..0353acdc --- /dev/null +++ b/pkg/offering/openshift/ca/ca.go @@ -0,0 +1,202 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftca + +import ( + "context" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + basecaoverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("openshift_ca") + +type Override interface { + baseca.Override + CARoute(object v1.Object, route *routev1.Route, action resources.Action) error + OperationsRoute(object v1.Object, route *routev1.Route, action resources.Action) error +} + +var _ baseca.IBPCA = &CA{} + +type CA struct { + *baseca.CA + + CARouteManager resources.Manager + OperationsRouteManager resources.Manager + + Override Override +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config) *CA { + o := &override.Override{ + Override: basecaoverride.Override{ + Client: client, + }, + } + ca := &CA{ + CA: baseca.New(client, scheme, config, o), + Override: o, + } + ca.CreateManagers() + return ca +} + +func (ca *CA) CreateManagers() { + resourceManager := resourcemanager.New(ca.Client, ca.Scheme) + ca.CARouteManager = resourceManager.CreateRouteManager("ca", ca.Override.CARoute, ca.GetLabels, ca.Config.CAInitConfig.RouteFile) + ca.OperationsRouteManager = resourceManager.CreateRouteManager("operations", ca.Override.OperationsRoute, ca.GetLabels, ca.Config.CAInitConfig.RouteFile) +} + +func (ca *CA) Reconcile(instance *current.IBPCA, update baseca.Update) (common.Result, error) { + + var err error + + versionSet, err := ca.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := ca.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Updating instance after pre reconcile checks") + err := ca.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPCA{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + err = ca.AddTLSCryptoIfMissing(instance, ca.GetEndpointsDNS(instance)) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to generate tls crypto") + } + + err = ca.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.CAInitilizationFailed, "failed to initialize ca") + } + + err = ca.ReconcileManagers(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + if update.CATagUpdated() { + if err := ca.ReconcileFabricCAMigration(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricCAMigrationFailed, "failed to migrate fabric ca versions") + } + } + + err = ca.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = ca.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + status, err := ca.CheckCertificates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check for expiring certificates") + } + + if update.CACryptoUpdated() { + err = ca.Restart.ForTLSReenroll(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + err = ca.HandleActions(instance, update) + if err != nil { + return common.Result{}, err + } + + err = ca.HandleRestart(instance, update) + if err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (ca *CA) ReconcileManagers(instance *current.IBPCA, update baseca.Update) error { + err := ca.CA.ReconcileManagers(instance, update) + if err != nil { + return err + } + + err = ca.CARouteManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed CA Route reconciliation") + } + + err = ca.OperationsRouteManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed Operations Route reconciliation") + } + + return nil +} diff --git a/pkg/offering/openshift/ca/ca_suite_test.go b/pkg/offering/openshift/ca/ca_suite_test.go new file mode 100644 index 00000000..8cf7db26 --- /dev/null +++ b/pkg/offering/openshift/ca/ca_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftca_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestCa(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ca Suite") +} diff --git a/pkg/offering/openshift/ca/ca_test.go b/pkg/offering/openshift/ca/ca_test.go new file mode 100644 index 00000000..2325bbce --- /dev/null +++ b/pkg/offering/openshift/ca/ca_test.go @@ -0,0 +1,267 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftca_test + +import ( + "encoding/json" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "context" + + corev1 "k8s.io/api/core/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/ca/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/ca" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca" + basecamocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/mocks" + openshiftca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/ca" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/ca/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("Openshift CA", func() { + const ( + defaultConfigs = "../../../../defaultconfig/ca" + testdataDir = "../../../../testdata" + + testCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo" + ) + + var ( + ca *openshiftca.CA + instance *current.IBPCA + mockKubeClient *mocks.Client + + deploymentMgr *managermocks.ResourceManager + serviceMgr *managermocks.ResourceManager + pvcMgr *managermocks.ResourceManager + roleMgr *managermocks.ResourceManager + roleBindingMgr *managermocks.ResourceManager + serviceAccountMgr *managermocks.ResourceManager + caRouteManager *managermocks.ResourceManager + operationsRouteManager *managermocks.ResourceManager + + initMock *basecamocks.InitializeIBPCA + update *basecamocks.Update + certMgr *basecamocks.CertificateManager + ) + + Context("Reconciles", func() { + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + update = &basecamocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPCA{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPCA", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ca1", + Namespace: "test", + }, + Spec: current.IBPCASpec{ + Domain: "domain", + Images: ¤t.CAImages{}, + Replicas: &replicas, + FabricVersion: "1.4.9-0", + }, + Status: current.IBPCAStatus{ + CRStatus: current.CRStatus{ + Version: version.Operator, + }, + }, + } + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case instance.Name + "-ca-crypto": + o.Name = instance.Name + "-ca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"tls-cert.pem": []byte(testCert)} + case instance.Name + "-tlsca-crypto": + o.Name = instance.Name + "-tlsca-crypto" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte(testCert)} + } + } + return nil + } + deploymentMgr = &managermocks.ResourceManager{} + serviceMgr = &managermocks.ResourceManager{} + pvcMgr = &managermocks.ResourceManager{} + roleMgr = &managermocks.ResourceManager{} + roleBindingMgr = &managermocks.ResourceManager{} + serviceAccountMgr = &managermocks.ResourceManager{} + caRouteManager = &managermocks.ResourceManager{} + operationsRouteManager = &managermocks.ResourceManager{} + initMock = &basecamocks.InitializeIBPCA{} + restartMgr := &basecamocks.RestartManager{} + certMgr = &basecamocks.CertificateManager{} + + cfg := &config.Config{ + CAInitConfig: &initializer.Config{ + CADefaultConfigPath: filepath.Join(defaultConfigs, "/ca.yaml"), + CAOverrideConfigPath: filepath.Join(testdataDir, "init/override.yaml"), + TLSCADefaultConfigPath: filepath.Join(defaultConfigs, "tlsca.yaml"), + TLSCAOverrideConfigPath: filepath.Join(testdataDir, "init/override.yaml"), + SharedPath: "shared", + }, + Operator: config.Operator{ + Versions: &deployer.Versions{ + CA: map[string]deployer.VersionCA{ + "1.4.9-0": {}, + }, + }, + }, + } + + certMgr.GetSecretReturns(&corev1.Secret{}, nil) + deploymentMgr.ExistsReturns(true) + ca = &openshiftca.CA{ + CA: &baseca.CA{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Override: &override.Override{}, + Config: cfg, + Initializer: initMock, + Restart: restartMgr, + CertificateManager: certMgr, + }, + CARouteManager: caRouteManager, + OperationsRouteManager: operationsRouteManager, + Override: &override.Override{}, + } + }) + + It("returns a breaking error if initialization fails", func() { + initMock.HandleEnrollmentCAInitReturns(nil, errors.New("failed to init")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 20 - failed to initialize ca: failed to init")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("returns an error if pvc manager fails to reconcile", func() { + pvcMgr.ReconcileReturns(errors.New("failed to reconcile pvc")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed PVC reconciliation: failed to reconcile pvc")) + }) + + It("returns an error if service manager fails to reconcile", func() { + serviceMgr.ReconcileReturns(errors.New("failed to reconcile service")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Service reconciliation: failed to reconcile service")) + }) + + It("returns an error if role manager fails to reconcile", func() { + roleMgr.ReconcileReturns(errors.New("failed to reconcile role")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role")) + }) + + It("returns an error if role binding manager fails to reconcile", func() { + roleBindingMgr.ReconcileReturns(errors.New("failed to reconcile role binding")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile role binding")) + }) + + It("returns an error if service account manager fails to reconcile", func() { + serviceAccountMgr.ReconcileReturns(errors.New("failed to reconcile service account")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to reconcile service account")) + }) + + It("returns an error if deployment manager fails to reconcile", func() { + deploymentMgr.ReconcileReturns(errors.New("failed to reconcile deployment")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Deployment reconciliation: failed to reconcile deployment")) + }) + + It("returns an error if ca route manager fails to reconcile", func() { + caRouteManager.ReconcileReturns(errors.New("failed to reconcile ca route")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed CA Route reconciliation: failed to reconcile ca route")) + }) + + It("returns an error if operations route manager fails to reconcile", func() { + operationsRouteManager.ReconcileReturns(errors.New("failed to reconcile operations route")) + _, err := ca.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Operations Route reconciliation: failed to reconcile operations route")) + }) + + It("returns an error if restart fails", func() { + update.RestartNeededReturns(true) + mockKubeClient.PatchReturns(errors.New("patch failed")) + _, err := ca.Reconcile(instance, update) + Expect(err).Should(MatchError(ContainSubstring("patch failed"))) + }) + + It("reconciles IBPCA", func() { + _, err := ca.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("AddTLSCryptoIfMissing", func() { + It("adds tls crypto", func() { + mockKubeClient.GetReturns(errors.New("fake error")) + err := ca.AddTLSCryptoIfMissing(instance, ¤t.CAEndpoints{}) + Expect(err).NotTo(HaveOccurred()) + + caOverrides := &v1.ServerConfig{} + err = json.Unmarshal(instance.Spec.ConfigOverride.CA.Raw, caOverrides) + Expect(err).NotTo(HaveOccurred()) + + Expect(caOverrides.TLS.CertFile).NotTo(Equal("")) + Expect(caOverrides.TLS.KeyFile).NotTo(Equal("")) + }) + }) +}) diff --git a/pkg/offering/openshift/ca/override/caroute.go b/pkg/offering/openshift/ca/override/caroute.go new file mode 100644 index 00000000..033903b3 --- /dev/null +++ b/pkg/offering/openshift/ca/override/caroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) CARoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateCARouteOverride(instance, route) + case resources.Update: + return o.UpdateCARouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateCARouteOverride(instance *current.IBPCA, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-ca", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-ca" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("http"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateCARouteOverride(instance *current.IBPCA, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/ca/override/operationroute.go b/pkg/offering/openshift/ca/override/operationroute.go new file mode 100644 index 00000000..ebbb3a63 --- /dev/null +++ b/pkg/offering/openshift/ca/override/operationroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) OperationsRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPCA) + switch action { + case resources.Create: + return o.CreateOperationsRouteOverride(instance, route) + case resources.Update: + return o.UpdateOperationsRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateOperationsRouteOverride(instance *current.IBPCA, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-operations", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-operations" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("operations"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateOperationsRouteOverride(instance *current.IBPCA, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/ca/override/override.go b/pkg/offering/openshift/ca/override/override.go new file mode 100644 index 00000000..d15aa416 --- /dev/null +++ b/pkg/offering/openshift/ca/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + baseca "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/ca/override" +) + +type Override struct { + baseca.Override +} diff --git a/pkg/offering/openshift/ca/override/override_suite_test.go b/pkg/offering/openshift/ca/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/openshift/ca/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/openshift/ca/override/override_test.go b/pkg/offering/openshift/ca/override/override_test.go new file mode 100644 index 00000000..5ec03cc2 --- /dev/null +++ b/pkg/offering/openshift/ca/override/override_test.go @@ -0,0 +1,87 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/ca/override" + routev1 "github.com/openshift/api/route/v1" +) + +var _ = Describe("Openshift CA Overrides", func() { + var ( + route *routev1.Route + overrider *override.Override + instance *current.IBPCA + ) + + BeforeEach(func() { + route = &routev1.Route{} + overrider = &override.Override{} + + instance = ¤t.IBPCA{ + Spec: current.IBPCASpec{ + Domain: "test-domain", + }, + } + instance.Name = "route1" + instance.Namespace = "testNS" + }) + + Context("CA Route", func() { + When("creating a new CA Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.CARoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-ca", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-ca.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("http"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) + + Context("Operation Route", func() { + When("creating a new Operation Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.OperationsRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-operations", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-operations.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("operations"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) +}) diff --git a/pkg/offering/openshift/console/console.go b/pkg/offering/openshift/console/console.go new file mode 100644 index 00000000..108e5fc1 --- /dev/null +++ b/pkg/offering/openshift/console/console.go @@ -0,0 +1,177 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftconsole + +import ( + "context" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + baseconsoleoverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console/override" + "github.com/IBM-Blockchain/fabric-operator/version" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + defaultRoute = "./definitions/console/route.yaml" +) + +var log = logf.Log.WithName("openshift_console") + +type Override interface { + baseconsole.Override + ConsoleRoute(object v1.Object, route *routev1.Route, action resources.Action) error + ProxyRoute(object v1.Object, route *routev1.Route, action resources.Action) error +} + +var _ baseconsole.IBPConsole = &Console{} + +type Console struct { + *baseconsole.Console + + RouteManager resources.Manager + ProxyRouteManager resources.Manager + + Override Override +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config) *Console { + o := &override.Override{ + Override: baseconsoleoverride.Override{}, + } + + console := &Console{ + Console: baseconsole.New(client, scheme, config, o), + Override: o, + } + console.CreateManagers() + return console +} + +func (c *Console) CreateManagers() { + resourceManager := resourcemanager.New(c.Client, c.Scheme) + c.RouteManager = resourceManager.CreateRouteManager("console", c.Override.ConsoleRoute, c.GetLabels, c.Config.ConsoleInitConfig.RouteFile) + c.ProxyRouteManager = resourceManager.CreateRouteManager("console-proxy", c.Override.ProxyRoute, c.GetLabels, c.Config.ConsoleInitConfig.RouteFile) +} + +func (c *Console) Reconcile(instance *current.IBPConsole, update baseconsole.Update) (common.Result, error) { + + var err error + + versionSet, err := c.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + instanceUpdated, err := c.PreReconcileChecks(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Updating instance after pre reconcile checks") + err = c.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPConsole{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + err = c.ReconcileManagers(instance, update.SpecUpdated()) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = c.CheckStates(instance, update.SpecUpdated()) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + err = c.CheckForConfigMapUpdates(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check for config map updates") + } + + err = c.HandleActions(instance, update) + if err != nil { + return common.Result{}, err + } + + if err := c.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{}, nil +} + +func (c *Console) ReconcileManagers(instance *current.IBPConsole, update bool) error { + err := c.Console.ReconcileManagers(instance, update) + if err != nil { + return err + } + + err = c.RouteManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Console Route reconciliation") + } + + err = c.ProxyRouteManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Proxy Route reconciliation") + } + + err = c.NetworkPolicyReconcile(instance) + if err != nil { + return errors.Wrap(err, "failed Network Policy reconciliation") + } + return nil +} diff --git a/pkg/offering/openshift/console/console_suite_test.go b/pkg/offering/openshift/console/console_suite_test.go new file mode 100644 index 00000000..75e0e62c --- /dev/null +++ b/pkg/offering/openshift/console/console_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftconsole_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConsole(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Console Suite") +} diff --git a/pkg/offering/openshift/console/console_test.go b/pkg/offering/openshift/console/console_test.go new file mode 100644 index 00000000..c99e70c9 --- /dev/null +++ b/pkg/offering/openshift/console/console_test.go @@ -0,0 +1,138 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftconsole_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console" + baseconsolemocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/mocks" + openshiftconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("Openshift Console", func() { + + var ( + console *openshiftconsole.Console + instance *current.IBPConsole + mockKubeClient *mocks.Client + + consoleRouteManager *managermocks.ResourceManager + proxyRouteManager *managermocks.ResourceManager + deploymentMgr *managermocks.ResourceManager + update *baseconsolemocks.Update + ) + + Context("Reconciles", func() { + BeforeEach(func() { + mockKubeClient = &mocks.Client{} + update = &baseconsolemocks.Update{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + License: current.License{ + Accept: true, + }, + Email: "xyz@ibm.com", + PasswordSecretName: "secret", + ImagePullSecrets: []string{"testsecret"}, + RegistryURL: "ghcr.io/ibm-blockchain/", + ServiceAccountName: "test", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test-domain", + }, + Versions: ¤t.Versions{}, + ConnectionString: "http://fake.url", + }, + } + instance.Kind = "IBPConsole" + instance.Name = "route1" + instance.Namespace = "testNS" + instance.Status.Version = version.Operator + + deploymentMgr = &managermocks.ResourceManager{} + serviceMgr := &managermocks.ResourceManager{} + pvcMgr := &managermocks.ResourceManager{} + configMapMgr := &managermocks.ResourceManager{} + consoleConfigMapMgr := &managermocks.ResourceManager{} + deployerConfigMapMgr := &managermocks.ResourceManager{} + roleMgr := &managermocks.ResourceManager{} + roleBindingMgr := &managermocks.ResourceManager{} + serviceAccountMgr := &managermocks.ResourceManager{} + + consoleRouteManager = &managermocks.ResourceManager{} + proxyRouteManager = &managermocks.ResourceManager{} + + deploymentMgr.ExistsReturns(true) + console = &openshiftconsole.Console{ + Console: &baseconsole.Console{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: &config.Config{}, + + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + ConfigMapManager: configMapMgr, + ConsoleConfigMapManager: consoleConfigMapMgr, + DeployerConfigMapManager: deployerConfigMapMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Restart: &baseconsolemocks.RestartManager{}, + }, + RouteManager: consoleRouteManager, + ProxyRouteManager: proxyRouteManager, + } + }) + + It("returns an error if console route manager fails to reconcile", func() { + consoleRouteManager.ReconcileReturns(errors.New("failed to reconcile ca route")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Console Route reconciliation: failed to reconcile ca route")) + }) + + It("returns an error if proxy route manager fails to reconcile", func() { + proxyRouteManager.ReconcileReturns(errors.New("failed to reconcile operations route")) + _, err := console.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Proxy Route reconciliation: failed to reconcile operations route")) + }) + + It("restarts pods by deleting deployment", func() { + update.RestartNeededReturns(true) + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + Expect(mockKubeClient.PatchCallCount()).To(Equal(1)) + }) + + It("reconciles IBPConsole", func() { + _, err := console.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/offering/openshift/console/override/consolecm.go b/pkg/offering/openshift/console/override/consolecm.go new file mode 100644 index 00000000..9b8ba85f --- /dev/null +++ b/pkg/offering/openshift/console/override/consolecm.go @@ -0,0 +1,78 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func (o *Override) ConsoleCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateConsoleCM(instance, cm, options) + case resources.Update: + return o.UpdateConsoleCM(instance, cm, options) + } + + return nil +} + +func (o *Override) CreateConsoleCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + data := cm.Data["settings.yaml"] + + config := &consolev1.ConsoleSettingsConfig{} + err := yaml.Unmarshal([]byte(data), config) + if err != nil { + return err + } + + if instance.Spec.NetworkInfo == nil || instance.Spec.NetworkInfo.Domain == "" { + return errors.New("domain not provided") + } + + err = baseconsole.CommonConsoleCM(instance, config, options) + if err != nil { + return err + } + + config.Infrastructure = baseconsole.OPENSHIFT + // config.ProxyTLSUrl = fmt.Sprintf("https://%s-%s-console.%s:443", instance.GetNamespace(), instance.GetName(), instance.Spec.NetworkInfo.Domain) + + bytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data["settings.yaml"] = string(bytes) + + return nil +} diff --git a/pkg/offering/openshift/console/override/consolecm_test.go b/pkg/offering/openshift/console/override/consolecm_test.go new file mode 100644 index 00000000..0f34c990 --- /dev/null +++ b/pkg/offering/openshift/console/override/consolecm_test.go @@ -0,0 +1,161 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + consolev1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/console/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Openshift Console Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + + cm, err = util.GetConfigMapFromFile("../../../../../definitions/console/console-configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consolecm", + Namespace: "consolecmns", + }, + Spec: current.IBPConsoleSpec{ + Email: "test@ibm.com", + AuthScheme: "scheme1", + ConfigtxlatorURL: "configtx.ibm.com", + DeployerURL: "deployer.ibm.com", + DeployerTimeout: 5, + Components: "component1", + Sessions: "session1", + System: "system1", + SystemChannel: "channel1", + FeatureFlags: &consolev1.FeatureFlags{ + CreateChannelEnabled: true, + }, + ClusterData: &consolev1.IBPConsoleClusterData{ + Zones: []string{"zone1"}, + Type: "type1", + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "ibm.com", + }, + }, + } + }) + + Context("create", func() { + It("returns an error if domain not provided", func() { + instance.Spec.NetworkInfo.Domain = "" + err := overrider.ConsoleCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("domain not provided")) + }) + + It("overrides values based on spec", func() { + err := overrider.ConsoleCM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &v1.ConsoleSettingsConfig{} + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + CommonConsoleCMOverrides(instance, config) + }) + }) + + Context("update", func() { + It("overrides values based on spec", func() { + err := overrider.ConsoleCM(instance, cm, resources.Update, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &v1.ConsoleSettingsConfig{} + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + CommonConsoleCMOverrides(instance, config) + }) + }) +}) + +func CommonConsoleCMOverrides(instance *current.IBPConsole, config *v1.ConsoleSettingsConfig) { + By("setting email", func() { + Expect(config.Email).To(Equal(instance.Spec.Email)) + }) + + By("setting auth scheme", func() { + Expect(config.AuthScheme).To(Equal(instance.Spec.AuthScheme)) + }) + + By("setting configtxlator URL", func() { + Expect(config.Configtxlator).To(Equal(instance.Spec.ConfigtxlatorURL)) + }) + + By("setting Deployer URL", func() { + Expect(config.DeployerURL).To(Equal(instance.Spec.DeployerURL)) + }) + + By("setting Deployer timeout", func() { + Expect(config.DeployerTimeout).To(Equal(instance.Spec.DeployerTimeout)) + }) + + By("setting components", func() { + Expect(config.DBCustomNames.Components).To(Equal(instance.Spec.Components)) + }) + + By("setting sessions", func() { + Expect(config.DBCustomNames.Sessions).To(Equal(instance.Spec.Sessions)) + }) + + By("setting system", func() { + Expect(config.DBCustomNames.System).To(Equal(instance.Spec.System)) + }) + + By("setting system channel", func() { + Expect(config.SystemChannelID).To(Equal(instance.Spec.SystemChannel)) + }) + + By("setting Proxy TLS Reqs", func() { + Expect(config.ProxyTLSReqs).To(Equal("always")) + }) + + By("settings feature flags", func() { + Expect(config.Featureflags).To(Equal(instance.Spec.FeatureFlags)) + }) + + By("settings cluster data", func() { + Expect(config.ClusterData).To(Equal(instance.Spec.ClusterData)) + }) +} diff --git a/pkg/offering/openshift/console/override/consoleroute.go b/pkg/offering/openshift/console/override/consoleroute.go new file mode 100644 index 00000000..d5e5df55 --- /dev/null +++ b/pkg/offering/openshift/console/override/consoleroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) ConsoleRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateConsoleRoute(instance, route) + case resources.Update: + return o.UpdateConsoleRoute(instance, route) + } + + return nil +} + +func (o *Override) CreateConsoleRoute(instance *current.IBPConsole, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-console", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-console" + "." + instance.Spec.NetworkInfo.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("optools"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateConsoleRoute(instance *current.IBPConsole, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/console/override/consoleroute_test.go b/pkg/offering/openshift/console/override/consoleroute_test.go new file mode 100644 index 00000000..15158346 --- /dev/null +++ b/pkg/offering/openshift/console/override/consoleroute_test.go @@ -0,0 +1,70 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console/override" + routev1 "github.com/openshift/api/route/v1" +) + +var _ = Describe("Openshift Console Route Overrides", func() { + var ( + route *routev1.Route + overrider *override.Override + instance *current.IBPConsole + ) + + BeforeEach(func() { + route = &routev1.Route{} + overrider = &override.Override{} + + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test-domain", + }, + }, + } + instance.Name = "route1" + instance.Namespace = "testNS" + }) + + Context("create", func() { + It("appropriately overrides the respective values", func() { + err := overrider.ConsoleRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-console", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-console.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("optools"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) +}) diff --git a/pkg/offering/openshift/console/override/deployercm.go b/pkg/offering/openshift/console/override/deployercm.go new file mode 100644 index 00000000..6fc73dfd --- /dev/null +++ b/pkg/offering/openshift/console/override/deployercm.go @@ -0,0 +1,79 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func (o *Override) DeployerCM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateDeployerCM(instance, cm, options) + case resources.Update: + return o.UpdateDeployerCM(instance, cm, options) + } + + return nil +} + +func (o *Override) CreateDeployerCM(instance *current.IBPConsole, cm *corev1.ConfigMap, options map[string]interface{}) error { + data := cm.Data["settings.yaml"] + + config := &deployer.Config{} + err := yaml.Unmarshal([]byte(data), config) + if err != nil { + return err + } + + if instance.Spec.NetworkInfo == nil || instance.Spec.NetworkInfo.Domain == "" { + return errors.New("domain not provided") + } + + err = baseconsole.CommonDeployerCM(instance, config, options) + if err != nil { + return err + } + + config.ClusterType = offering.OPENSHIFT.String() + config.ServiceConfig.Type = corev1.ServiceTypeClusterIP + + bytes, err := yaml.Marshal(config) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data["settings.yaml"] = string(bytes) + + return nil +} diff --git a/pkg/offering/openshift/console/override/deployercm_test.go b/pkg/offering/openshift/console/override/deployercm_test.go new file mode 100644 index 00000000..3bbd2ee6 --- /dev/null +++ b/pkg/offering/openshift/console/override/deployercm_test.go @@ -0,0 +1,254 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/yaml" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Openshift Console Deployer Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + ImagePullSecrets: []string{"pullsecret"}, + ConnectionString: "connectionString1", + Storage: ¤t.ConsoleStorage{ + Console: ¤t.StorageSpec{ + Class: "sc1", + }, + }, + NetworkInfo: ¤t.NetworkInfo{ + Domain: "domain1", + }, + Versions: ¤t.Versions{ + CA: map[string]current.VersionCA{ + "1.4.6-1": current.VersionCA{ + Default: true, + Version: "1.4.6-1", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.4.6", + CAImage: "ca-image", + CATag: "1.4.6", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "1.4.6-1": current.VersionPeer{ + Default: true, + Version: "1.4.6-1", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.4.6", + PeerImage: "peer-image", + PeerTag: "1.4.6", + DindImage: "dind-iamge", + DindTag: "1.4.6", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.4.6", + FluentdImage: "fluentd-image", + FluentdTag: "1.4.6", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.4.6", + CCLauncherImage: "cclauncer-image", + CCLauncherTag: "1.4.6", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "1.4.6-1": current.VersionOrderer{ + Default: true, + Version: "1.4.6-1", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.4.6", + OrdererImage: "orderer-image", + OrdererTag: "1.4.6", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.4.6", + }, + }, + }, + }, + CRN: ¤t.CRN{ + CName: "cname", + CType: "ctype", + Location: "location1", + Servicename: "Servicename1", + Version: "version1", + AccountID: "id123", + }, + Deployer: ¤t.Deployer{ + ConnectionString: "connectionstring2", + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../testdata/deployercm/deployer-configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("return an error if no image pull secret provided", func() { + instance.Spec.ImagePullSecrets = nil + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no image pull secret provided")) + }) + + It("return an error if no domain provided", func() { + instance.Spec.NetworkInfo.Domain = "" + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("domain not provided")) + }) + + It("overrides values based on spec", func() { + err := overrider.DeployerCM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + config := &deployer.Config{} + + err = yaml.Unmarshal([]byte(cm.Data["settings.yaml"]), config) + Expect(err).NotTo(HaveOccurred()) + + By("setting cluster type", func() { + Expect(config.ClusterType).To(Equal(offering.OPENSHIFT.String())) + }) + + By("setting service type", func() { + Expect(config.ServiceConfig.Type).To(Equal(corev1.ServiceTypeClusterIP)) + }) + + By("setting domain", func() { + Expect(config.Domain).To(Equal(instance.Spec.NetworkInfo.Domain)) + }) + + By("setting image pull secret", func() { + Expect(config.ImagePullSecrets).To(Equal(instance.Spec.ImagePullSecrets)) + }) + + By("setting connection string", func() { + Expect(config.Database.ConnectionURL).To(Equal(instance.Spec.Deployer.ConnectionString)) + }) + + By("setting versions", func() { + expectedVersions := ¤t.Versions{ + CA: map[string]current.VersionCA{ + "1.4.6-1": current.VersionCA{ + Default: true, + Version: "1.4.6-1", + Image: current.CAImages{ + CAInitImage: "ca-init-image", + CAInitTag: "1.4.6", + CAImage: "ca-image", + CATag: "1.4.6", + }, + }, + }, + Peer: map[string]current.VersionPeer{ + "1.4.6-1": current.VersionPeer{ + Default: true, + Version: "1.4.6-1", + Image: current.PeerImages{ + PeerInitImage: "peer-init-image", + PeerInitTag: "1.4.6", + PeerImage: "peer-image", + PeerTag: "1.4.6", + DindImage: "dind-iamge", + DindTag: "1.4.6", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.4.6", + FluentdImage: "fluentd-image", + FluentdTag: "1.4.6", + CouchDBImage: "couchdb-image", + CouchDBTag: "1.4.6", + CCLauncherImage: "cclauncer-image", + CCLauncherTag: "1.4.6", + }, + }, + }, + Orderer: map[string]current.VersionOrderer{ + "1.4.6-1": current.VersionOrderer{ + Default: true, + Version: "1.4.6-1", + Image: current.OrdererImages{ + OrdererInitImage: "orderer-init-image", + OrdererInitTag: "1.4.6", + OrdererImage: "orderer-image", + OrdererTag: "1.4.6", + GRPCWebImage: "grpcweb-image", + GRPCWebTag: "1.4.6", + }, + }, + }, + } + + typeConvertedVersions := ¤t.Versions{} + util.ConvertSpec(config.Versions, typeConvertedVersions) + Expect(typeConvertedVersions).To(Equal(expectedVersions)) + }) + + By("setting storage class name", func() { + Expect(config.Defaults.Storage.CA.CA.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Peer.Peer.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Peer.StateDB.Class).To(Equal(instance.Spec.Storage.Console.Class)) + Expect(config.Defaults.Storage.Orderer.Orderer.Class).To(Equal(instance.Spec.Storage.Console.Class)) + }) + + By("setting CRN", func() { + crn := ¤t.CRN{ + CName: instance.Spec.CRN.CName, + CType: instance.Spec.CRN.CType, + Location: instance.Spec.CRN.Location, + Servicename: instance.Spec.CRN.Servicename, + Version: instance.Spec.CRN.Version, + AccountID: instance.Spec.CRN.AccountID, + } + Expect(config.CRN).To(Equal(crn)) + }) + }) + }) + + Context("update", func() { + It("return an error if no image pull secret provided", func() { + instance.Spec.ImagePullSecrets = nil + err := overrider.DeployerCM(instance, cm, resources.Update, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("no image pull secret provided")) + }) + }) +}) diff --git a/pkg/offering/openshift/console/override/envcm.go b/pkg/offering/openshift/console/override/envcm.go new file mode 100644 index 00000000..b8000baf --- /dev/null +++ b/pkg/offering/openshift/console/override/envcm.go @@ -0,0 +1,52 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (o *Override) CM(object v1.Object, cm *corev1.ConfigMap, action resources.Action, options map[string]interface{}) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateCM(instance, cm) + case resources.Update: + return o.UpdateCM(instance, cm) + } + + return nil +} + +func (o *Override) CreateCM(instance *current.IBPConsole, cm *corev1.ConfigMap) error { + cm.Data["HOST_URL"] = fmt.Sprintf("https://%s-%s-console.%s:443", instance.GetNamespace(), instance.GetName(), instance.Spec.NetworkInfo.Domain) + cm.Data["HOST_URL_WS"] = fmt.Sprintf("https://%s-%s-console.%s:443", instance.GetNamespace(), instance.GetName(), instance.Spec.NetworkInfo.Domain) + + err := o.CommonCM(instance, cm) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/offering/openshift/console/override/envcm_test.go b/pkg/offering/openshift/console/override/envcm_test.go new file mode 100644 index 00000000..108b90de --- /dev/null +++ b/pkg/offering/openshift/console/override/envcm_test.go @@ -0,0 +1,95 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/util" +) + +var _ = Describe("Openshift Console Env Config Map Overrides", func() { + var ( + overrider *override.Override + instance *current.IBPConsole + cm *corev1.ConfigMap + ) + + BeforeEach(func() { + var err error + overrider = &override.Override{} + instance = ¤t.IBPConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name1", + Namespace: "ns1", + }, + Spec: current.IBPConsoleSpec{ + ConnectionString: "connection_string", + TLSSecretName: "tls_secret_name", + System: "system1", + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test.domain", + ConsolePort: 31010, + ProxyPort: 31011, + }, + }, + } + cm, err = util.GetConfigMapFromFile("../../../../../definitions/console/configmap.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + Context("create", func() { + It("appropriately overrides the respective values for env config map", func() { + err := overrider.CM(instance, cm, resources.Create, nil) + Expect(err).NotTo(HaveOccurred()) + + By("setting HOST_URL", func() { + Expect(cm.Data["HOST_URL"]).To(Equal(fmt.Sprintf("https://%s-%s-console.%s:443", instance.GetNamespace(), instance.GetName(), instance.Spec.NetworkInfo.Domain))) + }) + + By("setting HOST_URL", func() { + Expect(cm.Data["HOST_URL_WS"]).To(Equal(fmt.Sprintf("https://%s-%s-console.%s:443", instance.GetNamespace(), instance.GetName(), instance.Spec.NetworkInfo.Domain))) + }) + + By("setting DB_CONNECTION_STRING", func() { + Expect(cm.Data["DB_CONNECTION_STRING"]).To(Equal(instance.Spec.ConnectionString)) + }) + + By("setting DB_SYSTEM", func() { + Expect(cm.Data["DB_SYSTEM"]).To(Equal(instance.Spec.System)) + }) + + By("setting KEY_FILE_PATH", func() { + Expect(cm.Data["KEY_FILE_PATH"]).To(Equal("/certs/tls/tls.key")) + }) + + By("setting PEM_FILE_PATH", func() { + Expect(cm.Data["PEM_FILE_PATH"]).To(Equal("/certs/tls/tls.crt")) + }) + }) + }) +}) diff --git a/pkg/offering/openshift/console/override/override.go b/pkg/offering/openshift/console/override/override.go new file mode 100644 index 00000000..039fed76 --- /dev/null +++ b/pkg/offering/openshift/console/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + baseconsole "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/console/override" +) + +type Override struct { + baseconsole.Override +} diff --git a/pkg/offering/openshift/console/override/override_suite_test.go b/pkg/offering/openshift/console/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/openshift/console/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/openshift/console/override/proxyroute.go b/pkg/offering/openshift/console/override/proxyroute.go new file mode 100644 index 00000000..5b6f8f2b --- /dev/null +++ b/pkg/offering/openshift/console/override/proxyroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) ProxyRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPConsole) + switch action { + case resources.Create: + return o.CreateProxyRoute(instance, route) + case resources.Update: + return o.UpdateProxyRoute(instance, route) + } + + return nil +} + +func (o *Override) CreateProxyRoute(instance *current.IBPConsole, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-proxy", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-proxy" + "." + instance.Spec.NetworkInfo.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("optools"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateProxyRoute(instance *current.IBPConsole, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/console/override/proxyroute_test.go b/pkg/offering/openshift/console/override/proxyroute_test.go new file mode 100644 index 00000000..b4542408 --- /dev/null +++ b/pkg/offering/openshift/console/override/proxyroute_test.go @@ -0,0 +1,70 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/console/override" + routev1 "github.com/openshift/api/route/v1" +) + +var _ = Describe("Openshift Proxy Route Overrides", func() { + var ( + route *routev1.Route + overrider *override.Override + instance *current.IBPConsole + ) + + BeforeEach(func() { + route = &routev1.Route{} + overrider = &override.Override{} + + instance = ¤t.IBPConsole{ + Spec: current.IBPConsoleSpec{ + NetworkInfo: ¤t.NetworkInfo{ + Domain: "test-domain", + }, + }, + } + instance.Name = "route1" + instance.Namespace = "testNS" + }) + + Context("create", func() { + It("appropriately overrides the respective values", func() { + err := overrider.ProxyRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-proxy", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-proxy.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal("route1")) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("optools"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) +}) diff --git a/pkg/offering/openshift/orderer/node.go b/pkg/offering/openshift/orderer/node.go new file mode 100644 index 00000000..3e6c9b9b --- /dev/null +++ b/pkg/offering/openshift/orderer/node.go @@ -0,0 +1,259 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftorderer + +import ( + "context" + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Override interface { + baseorderer.Override + OrdererRoute(object v1.Object, route *routev1.Route, action resources.Action) error + OperationsRoute(object v1.Object, route *routev1.Route, action resources.Action) error + AdminRoute(object v1.Object, route *routev1.Route, action resources.Action) error + OrdererGRPCRoute(object v1.Object, route *routev1.Route, action resources.Action) error +} + +var _ baseorderer.IBPOrderer = &Node{} + +type Node struct { + *baseorderer.Node + + RouteManager resources.Manager + OperationsRouteManager resources.Manager + AdminRouteManager resources.Manager + GRPCRouteManager resources.Manager + + Override Override +} + +func NewNode(basenode *baseorderer.Node) *Node { + node := &Node{ + Node: basenode, + Override: &override.Override{}, + } + node.CreateManagers() + return node +} + +func (n *Node) CreateManagers() { + resourceManager := resourcemanager.New(n.Node.Client, n.Node.Scheme) + n.RouteManager = resourceManager.CreateRouteManager("", n.Override.OrdererRoute, n.GetLabels, n.Config.OrdererInitConfig.RouteFile) + n.OperationsRouteManager = resourceManager.CreateRouteManager("", n.Override.OperationsRoute, n.GetLabels, n.Config.OrdererInitConfig.RouteFile) + n.AdminRouteManager = resourceManager.CreateRouteManager("", n.Override.AdminRoute, n.GetLabels, n.Config.OrdererInitConfig.RouteFile) + n.GRPCRouteManager = resourceManager.CreateRouteManager("", n.Override.OrdererGRPCRoute, n.GetLabels, n.Config.OrdererInitConfig.RouteFile) +} + +func (n *Node) Reconcile(instance *current.IBPOrderer, update baseorderer.Update) (common.Result, error) { + var err error + var status *current.CRStatus + + versionSet, err := n.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + OverrideUpdateStatus: true, + }, nil + } + + instanceUpdated, err := n.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + externalEndpointUpdated := n.UpdateExternalEndpoint(instance) + + if instanceUpdated || externalEndpointUpdated { + log.Info(fmt.Sprintf("Updating instance after pre reconcile checks: %t, updating external endpoint: %t", + instanceUpdated, externalEndpointUpdated)) + + err = n.Client.Patch(context.TODO(), instance, nil, k8sclient.PatchOption{ + Resilient: &k8sclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPOrderer{}, + Strategy: client.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance") + } + + log.Info("Instance updated during reconcile checks, request will be requeued...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + Status: ¤t.CRStatus{ + Type: current.Initializing, + Reason: "Setting default values for either zone, region, and/or external endpoint", + Message: "Operator has updated spec with defaults as part of initialization", + }, + OverrideUpdateStatus: true, + }, nil + } + + err = n.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.OrdererInitilizationFailed, "failed to initialize orderer node") + } + + if update.OrdererTagUpdated() { + if err := n.ReconcileFabricOrdererMigration(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer versions") + } + } + + if update.MigrateToV2() { + if err := n.FabricOrdererMigrationV2_0(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer to version v2.x") + } + } + + if update.MigrateToV24() { + if err := n.FabricOrdererMigrationV2_4(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer to version v2.4.x") + } + } + + err = n.ReconcileManagers(instance, update, nil) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = n.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = n.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + err = n.UpdateParentStatus(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update parent's status") + } + + status, result, err := n.CustomLogic(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to run custom offering logic ") + } + if result != nil { + log.Info(fmt.Sprintf("Finished reconciling '%s' with Custom Logic result", instance.GetName())) + return *result, nil + } + + if update.EcertUpdated() { + log.Info("Ecert was updated") + // Request deployment restart for tls cert update + err = n.Restart.ForCertUpdate(commoninit.ECERT, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.TLSCertUpdated() { + log.Info("TLS cert was updated") + // Request deployment restart for ecert update + err = n.Restart.ForCertUpdate(commoninit.TLS, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.MSPUpdated() { + err = n.UpdateMSPCertificates(instance) + if err != nil { + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update certificates passed in MSP spec") + } + } + } + + if err := n.HandleActions(instance, update); err != nil { + return common.Result{}, err + } + + if err := n.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (n *Node) ReconcileManagers(instance *current.IBPOrderer, updated baseorderer.Update, genesisBlock []byte) error { + var err error + + err = n.Node.ReconcileManagers(instance, updated, genesisBlock) + if err != nil { + return err + } + + update := updated.SpecUpdated() + + err = n.RouteManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Orderer Route reconciliation") + } + + err = n.OperationsRouteManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Operations Route reconciliation") + } + + err = n.GRPCRouteManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Orderer GRPC Route reconciliation") + } + + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + err = n.AdminRouteManager.Reconcile(instance, update) + if err != nil { + return errors.Wrap(err, "failed Orderer Admin Route reconciliation") + } + } + + return nil +} diff --git a/pkg/offering/openshift/orderer/orderer.go b/pkg/offering/openshift/orderer/orderer.go new file mode 100644 index 00000000..2492c939 --- /dev/null +++ b/pkg/offering/openshift/orderer/orderer.go @@ -0,0 +1,156 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftorderer + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + baseordereroverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/orderer/override" + "github.com/IBM-Blockchain/fabric-operator/version" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + defaultRoute = "./definitions/orderer/route.yaml" +) + +var log = logf.Log.WithName("openshift_orderer") + +var _ baseorderer.IBPOrderer = &Orderer{} + +type Orderer struct { + *baseorderer.Orderer +} + +func New(client k8sclient.Client, scheme *runtime.Scheme, config *config.Config) *Orderer { + o := &override.Override{ + Override: baseordereroverride.Override{ + Client: client, + Config: config, + }, + } + + orderer := &Orderer{ + Orderer: baseorderer.New(client, scheme, config, o), + } + + return orderer +} + +func (o *Orderer) Reconcile(instance *current.IBPOrderer, update baseorderer.Update) (common.Result, error) { + + if instance.Spec.NodeNumber == nil { + versionSet, err := o.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + if instance.Status.Status == "" || instance.Status.Status == current.False || (instance.Status.Version != "" && version.String(instance.Status.Version).GreaterThan(version.V210)) { + instanceUpdated, err := o.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + if instanceUpdated { + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + OverrideUpdateStatus: true, + }, nil + } + } + } + + // TODO: Major rehaul is needed of versioning and migration strategy. Need a way to + // migrate as first step to get CR spec in appropriate state to avoid versioning checks + // like below and above + if (instance.Status.Version == "" && instance.Status.Status == current.True) || (instance.Status.Version != "" && version.String(instance.Status.Version).Equal(version.V210)) { + if instance.Spec.NodeNumber == nil { + number := 1 + instance.Spec.NodeNumber = &number + } + } + + if instance.Spec.NodeNumber == nil { + result, err := o.ReconcileCluster(instance, update, o.AddHostPortToProfile) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile cluster") + } + return result, nil + } + + result, err := o.ReconcileNode(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile node") + } + + return result, nil +} + +func (o *Orderer) ReconcileNode(instance *current.IBPOrderer, update baseorderer.Update) (common.Result, error) { + var err error + + hostAPI := fmt.Sprintf("%s-%s-orderer.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostOperations := fmt.Sprintf("%s-%s-operations.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostGrpc := fmt.Sprintf("%s-%s-grpcweb.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hosts := []string{} + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + hostAdmin := fmt.Sprintf("%s-%s-admin.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hosts = append(hosts, hostAPI, hostOperations, hostGrpc, hostAdmin, "127.0.0.1") + } else { + hosts = append(hosts, hostAPI, hostOperations, hostGrpc, "127.0.0.1") + } + + o.CheckCSRHosts(instance, hosts) + + log.Info(fmt.Sprintf("Reconciling Orderer node %s", instance.GetName())) + + openshiftnode := NewNode(baseorderer.NewNode(o.Client, o.Scheme, o.Config, instance.GetName(), o.RenewCertTimers, o.RestartManager)) + + if !instance.Spec.IsUsingChannelLess() && instance.Spec.GenesisBlock == "" && !(instance.Spec.IsPrecreateOrderer()) { + return common.Result{}, fmt.Errorf("Genesis block not provided for orderer node: %s", instance.GetName()) + } + + result, err := openshiftnode.Reconcile(instance, update) + if err != nil { + return common.Result{}, err + } + + return result, nil +} diff --git a/pkg/offering/openshift/orderer/orderer_suite_test.go b/pkg/offering/openshift/orderer/orderer_suite_test.go new file mode 100644 index 00000000..443e5799 --- /dev/null +++ b/pkg/offering/openshift/orderer/orderer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftorderer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOrderer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Orderer Suite") +} diff --git a/pkg/offering/openshift/orderer/orderer_test.go b/pkg/offering/openshift/orderer/orderer_test.go new file mode 100644 index 00000000..250b4e37 --- /dev/null +++ b/pkg/offering/openshift/orderer/orderer_test.go @@ -0,0 +1,88 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftorderer_test + +import ( + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + ordererinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer" + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/mocks" + openshiftorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/orderer" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ = Describe("Openshift Orderer", func() { + var ( + orderer *openshiftorderer.Orderer + instance *current.IBPOrderer + mockKubeClient *cmocks.Client + cfg *config.Config + update *mocks.Update + ) + + Context("Reconciles", func() { + BeforeEach(func() { + precreate := false + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + License: current.License{ + Accept: true, + }, + OrdererType: "etcdraft", + SystemChannelName: "testchainid", + OrgName: "orderermsp", + MSPID: "orderermsp", + ImagePullSecrets: []string{"regcred"}, + ClusterSecret: []*current.SecretSpec{}, + Secret: ¤t.SecretSpec{}, + IsPrecreate: &precreate, + GenesisBlock: "GenesisBlock", + Images: ¤t.OrdererImages{}, + }, + } + instance.Kind = "IBPOrderer" + + cfg = &config.Config{ + OrdererInitConfig: &ordererinit.Config{ + ConfigTxFile: "../../../../defaultconfig/orderer/configtx.yaml", + OUFile: "../../../../defaultconfig/orderer/ouconfig.yaml", + }, + } + + orderer = &openshiftorderer.Orderer{ + Orderer: &baseorderer.Orderer{ + Client: mockKubeClient, + Scheme: &runtime.Scheme{}, + Config: cfg, + }, + } + }) + + PIt("reconciles openshift orderer", func() { + _, err := orderer.ReconcileNode(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/offering/openshift/orderer/override/adminroute.go b/pkg/offering/openshift/orderer/override/adminroute.go new file mode 100644 index 00000000..5df784d6 --- /dev/null +++ b/pkg/offering/openshift/orderer/override/adminroute.go @@ -0,0 +1,72 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + "github.com/IBM-Blockchain/fabric-operator/version" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) AdminRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPOrderer) + currentVer := version.String(instance.Spec.FabricVersion) + if !(currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1)) { + return nil + } + switch action { + case resources.Create: + return o.CreateAdminRouteOverride(instance, route) + case resources.Update: + return o.UpdateAdminRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateAdminRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-admin", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-admin" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("orderer-admin"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateAdminRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/orderer/override/grpcroute.go b/pkg/offering/openshift/orderer/override/grpcroute.go new file mode 100644 index 00000000..affe2107 --- /dev/null +++ b/pkg/offering/openshift/orderer/override/grpcroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) OrdererGRPCRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateOrdererGRPCRouteOverride(instance, route) + case resources.Update: + return o.UpdateOrdererGRPCRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateOrdererGRPCRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-grpcweb", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-grpcweb" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("grpcweb"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateOrdererGRPCRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/orderer/override/operationroute.go b/pkg/offering/openshift/orderer/override/operationroute.go new file mode 100644 index 00000000..2649725f --- /dev/null +++ b/pkg/offering/openshift/orderer/override/operationroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) OperationsRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateOperationsRouteOverride(instance, route) + case resources.Update: + return o.UpdateOperationsRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateOperationsRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-operations", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-operations" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("operations"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateOperationsRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/orderer/override/ordererroute.go b/pkg/offering/openshift/orderer/override/ordererroute.go new file mode 100644 index 00000000..1f6ba04b --- /dev/null +++ b/pkg/offering/openshift/orderer/override/ordererroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) OrdererRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPOrderer) + switch action { + case resources.Create: + return o.CreateOrdererRouteOverride(instance, route) + case resources.Update: + return o.UpdateOrdererRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateOrdererRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-orderer", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-orderer" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("orderer-grpc"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateOrdererRouteOverride(instance *current.IBPOrderer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/orderer/override/override.go b/pkg/offering/openshift/orderer/override/override.go new file mode 100644 index 00000000..f7e2d1ee --- /dev/null +++ b/pkg/offering/openshift/orderer/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + baseorderer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/orderer/override" +) + +type Override struct { + baseorderer.Override +} diff --git a/pkg/offering/openshift/orderer/override/override_suite_test.go b/pkg/offering/openshift/orderer/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/openshift/orderer/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/openshift/orderer/override/override_test.go b/pkg/offering/openshift/orderer/override/override_test.go new file mode 100644 index 00000000..cf295316 --- /dev/null +++ b/pkg/offering/openshift/orderer/override/override_test.go @@ -0,0 +1,104 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/orderer/override" + routev1 "github.com/openshift/api/route/v1" +) + +var _ = Describe("Openshift Orderer Overrides", func() { + var ( + route *routev1.Route + overrider *override.Override + instance *current.IBPOrderer + ) + + BeforeEach(func() { + route = &routev1.Route{} + overrider = &override.Override{} + + instance = ¤t.IBPOrderer{ + Spec: current.IBPOrdererSpec{ + Domain: "test-domain", + }, + } + instance.Name = "route1" + instance.Namespace = "testNS" + }) + + Context("Orderer Route", func() { + When("creating a new Orderer Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.OrdererRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-orderer", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-orderer.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("orderer-grpc"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) + + Context("Operation Route", func() { + When("creating a new Operation Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.OperationsRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-operations", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-operations.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("operations"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) + + Context("GPRC Route", func() { + When("creating a new GRPC Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.OrdererGRPCRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-grpcweb", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-grpcweb.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("grpcweb"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) +}) diff --git a/pkg/offering/openshift/peer/override/grpcroute.go b/pkg/offering/openshift/peer/override/grpcroute.go new file mode 100644 index 00000000..6f46eff1 --- /dev/null +++ b/pkg/offering/openshift/peer/override/grpcroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) PeerGRPCRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreatePeerGRPCRouteOverride(instance, route) + case resources.Update: + return o.UpdatePeerGRPCRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreatePeerGRPCRouteOverride(instance *current.IBPPeer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-grpcweb", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-grpcweb" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("grpcweb"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: "passthrough", + } + + return nil +} + +func (o *Override) UpdatePeerGRPCRouteOverride(instance *current.IBPPeer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/peer/override/operationroute.go b/pkg/offering/openshift/peer/override/operationroute.go new file mode 100644 index 00000000..5db8420f --- /dev/null +++ b/pkg/offering/openshift/peer/override/operationroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) OperationsRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreateOperationsRouteOverride(instance, route) + case resources.Update: + return o.UpdateOperationsRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreateOperationsRouteOverride(instance *current.IBPPeer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-operations", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-operations" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("operations"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdateOperationsRouteOverride(instance *current.IBPPeer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/peer/override/override.go b/pkg/offering/openshift/peer/override/override.go new file mode 100644 index 00000000..a9147ca3 --- /dev/null +++ b/pkg/offering/openshift/peer/override/override.go @@ -0,0 +1,27 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" +) + +type Override struct { + basepeer.Override +} diff --git a/pkg/offering/openshift/peer/override/override_suite_test.go b/pkg/offering/openshift/peer/override/override_suite_test.go new file mode 100644 index 00000000..fa47c9b8 --- /dev/null +++ b/pkg/offering/openshift/peer/override/override_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOverride(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Override Suite") +} diff --git a/pkg/offering/openshift/peer/override/override_test.go b/pkg/offering/openshift/peer/override/override_test.go new file mode 100644 index 00000000..3e72b053 --- /dev/null +++ b/pkg/offering/openshift/peer/override/override_test.go @@ -0,0 +1,104 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/intstr" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/peer/override" + routev1 "github.com/openshift/api/route/v1" +) + +var _ = Describe("Openshift Peer Overrides", func() { + var ( + route *routev1.Route + overrider *override.Override + instance *current.IBPPeer + ) + + BeforeEach(func() { + route = &routev1.Route{} + overrider = &override.Override{} + + instance = ¤t.IBPPeer{ + Spec: current.IBPPeerSpec{ + Domain: "test-domain", + }, + } + instance.Name = "route1" + instance.Namespace = "testNS" + }) + + Context("Peer Route", func() { + When("creating a new Peer Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.PeerRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-peer", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-peer.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("peer-api"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) + + Context("Operation Route", func() { + When("creating a new Operation Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.OperationsRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-operations", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-operations.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("operations"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) + + Context("GPRC Route", func() { + When("creating a new GRPC Route", func() { + It("appropriately overrides the respective values", func() { + err := overrider.PeerGRPCRoute(instance, route, resources.Create) + Expect(err).NotTo(HaveOccurred()) + + Expect(route.Name).To(Equal(fmt.Sprintf("%s-grpcweb", instance.Name))) + Expect(route.Spec.Host).To(Equal("testNS-route1-grpcweb.test-domain")) + Expect(route.Spec.To.Kind).To(Equal("Service")) + Expect(route.Spec.To.Name).To(Equal(instance.Name)) + Expect(*route.Spec.To.Weight).To(Equal(int32(100))) + Expect(route.Spec.Port.TargetPort).To(Equal(intstr.FromString("grpcweb"))) + Expect(route.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationPassthrough)) + }) + }) + }) +}) diff --git a/pkg/offering/openshift/peer/override/peerroute.go b/pkg/offering/openshift/peer/override/peerroute.go new file mode 100644 index 00000000..cc071346 --- /dev/null +++ b/pkg/offering/openshift/peer/override/peerroute.go @@ -0,0 +1,67 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package override + +import ( + "fmt" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/service" + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (o *Override) PeerRoute(object v1.Object, route *routev1.Route, action resources.Action) error { + instance := object.(*current.IBPPeer) + switch action { + case resources.Create: + return o.CreatePeerRouteOverride(instance, route) + case resources.Update: + return o.UpdatePeerRouteOverride(instance, route) + } + + return nil +} + +func (o *Override) CreatePeerRouteOverride(instance *current.IBPPeer, route *routev1.Route) error { + route.Name = fmt.Sprintf("%s-peer", instance.GetName()) + route.Spec.Host = instance.Namespace + "-" + instance.GetName() + "-peer" + "." + instance.Spec.Domain + weight := int32(100) + route.Spec.To = routev1.RouteTargetReference{ + Kind: "Service", + Name: service.GetName(instance.Name), + Weight: &weight, + } + + route.Spec.Port = &routev1.RoutePort{ + TargetPort: intstr.FromString("peer-api"), + } + + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + } + + return nil +} + +func (o *Override) UpdatePeerRouteOverride(instance *current.IBPPeer, route *routev1.Route) error { + return nil +} diff --git a/pkg/offering/openshift/peer/peer.go b/pkg/offering/openshift/peer/peer.go new file mode 100644 index 00000000..db87625d --- /dev/null +++ b/pkg/offering/openshift/peer/peer.go @@ -0,0 +1,315 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftpeer + +import ( + "context" + "fmt" + "regexp" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + commoninit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" + resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + basepeeroverride "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/peer/override" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + openshiftv1 "github.com/openshift/api/config/v1" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("openshift_peer") + +type Override interface { + basepeer.Override + PeerRoute(object v1.Object, route *routev1.Route, action resources.Action) error + OperationsRoute(object v1.Object, route *routev1.Route, action resources.Action) error + PeerGRPCRoute(object v1.Object, route *routev1.Route, action resources.Action) error +} + +var _ basepeer.IBPPeer = &Peer{} + +type Peer struct { + *basepeer.Peer + + RouteManager resources.Manager + OperationsRouteManager resources.Manager + GRPCRouteManager resources.Manager + RestClient *clientset.Clientset + + Override Override +} + +func New(client controllerclient.Client, scheme *runtime.Scheme, config *config.Config, restclient *clientset.Clientset) *Peer { + o := &override.Override{ + Override: basepeeroverride.Override{ + Client: client, + DefaultCouchContainerFile: config.PeerInitConfig.CouchContainerFile, + DefaultCouchInitContainerFile: config.PeerInitConfig.CouchInitContainerFile, + DefaultCCLauncherFile: config.PeerInitConfig.CCLauncherFile, + }, + } + + peer := &Peer{ + Peer: basepeer.New(client, scheme, config, o), + Override: o, + RestClient: restclient, + } + + peer.CreateManagers() + return peer +} + +func (p *Peer) CreateManagers() { + resourceManager := resourcemanager.New(p.Client, p.Scheme) + p.RouteManager = resourceManager.CreateRouteManager("peer", p.Override.PeerRoute, p.GetLabels, p.Config.PeerInitConfig.RouteFile) + p.OperationsRouteManager = resourceManager.CreateRouteManager("operations", p.Override.OperationsRoute, p.GetLabels, p.Config.PeerInitConfig.RouteFile) + p.GRPCRouteManager = resourceManager.CreateRouteManager("grpcweb", p.Override.PeerGRPCRoute, p.GetLabels, p.Config.PeerInitConfig.RouteFile) +} + +func (p *Peer) ReconcileManagers(instance *current.IBPPeer, update basepeer.Update) error { + err := p.Peer.ReconcileManagers(instance, update) + if err != nil { + return err + } + + err = p.RouteManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed Peer Route reconciliation") + } + + err = p.OperationsRouteManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed Operations Route reconciliation") + } + + err = p.GRPCRouteManager.Reconcile(instance, update.SpecUpdated()) + if err != nil { + return errors.Wrap(err, "failed Peer GRPC Route reconciliation") + } + + return nil +} + +func (p *Peer) Reconcile(instance *current.IBPPeer, update basepeer.Update) (common.Result, error) { + var err error + var status *current.CRStatus + + versionSet, err := p.SetVersion(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, fmt.Sprintf("failed updating CR '%s' to version '%s'", instance.Name, version.Operator)) + } + if versionSet { + log.Info("Instance version updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + updatecr, err := p.SelectDinDArgs(instance) + if err != nil { + log.Info("Cannot get cluster version. Ignoring openshift cluster version") + } + + update.SetDindArgsUpdated(updatecr) + instanceUpdated, err := p.PreReconcileChecks(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed pre reconcile checks") + } + + // We do not have to wait for service to get the external endpoint + // thus we call UpdateExternalEndpoint in reconcile before reconcile managers + externalEndpointUpdated := p.UpdateExternalEndpoint(instance) + + hostAPI := fmt.Sprintf("%s-%s-peer.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostOperations := fmt.Sprintf("%s-%s-operations.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hostGrpc := fmt.Sprintf("%s-%s-grpcweb.%s", instance.Namespace, instance.Name, instance.Spec.Domain) + hosts := []string{hostAPI, hostOperations, hostGrpc, "127.0.0.1"} + csrHostUpdated := p.CheckCSRHosts(instance, hosts) + + if instanceUpdated || externalEndpointUpdated || csrHostUpdated { + log.Info(fmt.Sprintf("Updating instance after pre reconcile checks: %t, updating external endpoint: %t, csrhost Updated: %t", instanceUpdated, externalEndpointUpdated, csrHostUpdated)) + err := p.Client.Patch(context.TODO(), instance, nil, controllerclient.PatchOption{ + Resilient: &controllerclient.ResilientPatch{ + Retry: 3, + Into: ¤t.IBPPeer{}, + Strategy: k8sclient.MergeFrom, + }, + }) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update instance after prereconcile checks") + } + + log.Info("Instance updated, requeuing request...") + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + + jobRunning, err := p.HandleMigrationJobs(k8sclient.MatchingLabels{ + "owner": instance.GetName(), + "job-name": fmt.Sprintf("%s-dbmigration", instance.GetName()), + }, instance) + if jobRunning { + log.Info(fmt.Sprintf("Requeuing request until job completes")) + return common.Result{ + Result: reconcile.Result{ + Requeue: true, + }, + }, nil + } + if err != nil { + return common.Result{}, err + } + + err = p.Initialize(instance, update) + if err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.PeerInitilizationFailed, "failed to initialize peer") + } + + if update.PeerTagUpdated() { + if err := p.ReconcileFabricPeerMigrationV1_4(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer versions") + } + } + + if update.MigrateToV2() { + if err := p.ReconcileFabricPeerMigrationV2_0(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer to version v2.0.x") + } + } + + if update.MigrateToV24() { + if err := p.ReconcileFabricPeerMigrationV2_4(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer to version v2.4.x") + } + } + + err = p.ReconcileManagers(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to reconcile managers") + } + + err = p.UpdateConnectionProfile(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to create connection profile") + } + + err = p.CheckStates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to check and restore state") + } + + // custom product logic can be implemented here + // No-Op atm + status, result, err := p.CustomLogic(instance, update) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to run custom offering logic") + } + if result != nil { + log.Info(fmt.Sprintf("Finished reconciling '%s' with Custom Logic result", instance.GetName())) + return *result, nil + } + + if update.EcertUpdated() { + log.Info("Ecert was updated") + // Request deployment restart for tls cert update + err = p.Restart.ForCertUpdate(commoninit.ECERT, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.TLSCertUpdated() { + log.Info("TLS cert was updated") + // Request deployment restart for ecert update + err = p.Restart.ForCertUpdate(commoninit.TLS, instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update restart config") + } + } + + if update.MSPUpdated() { + err = p.UpdateMSPCertificates(instance) + if err != nil { + return common.Result{}, errors.Wrap(err, "failed to update certificates passed in MSP spec") + } + } + + if err := p.HandleActions(instance, update); err != nil { + return common.Result{}, err + } + + // If configs were update during initialize, need to restart pods to pick up new + // config changes. This should be done as the last the step, specifically after ReconcileManagers, + // to allow all any updates to the deployment to be completed before restarting. + // Trigger deployment restart by deleting deployment + if err := p.HandleRestart(instance, update); err != nil { + return common.Result{}, err + } + + return common.Result{ + Status: status, + }, nil +} + +func (p *Peer) SelectDinDArgs(instance *current.IBPPeer) (bool, error) { + + if len(instance.Spec.DindArgs) != 0 { + return false, nil + } + + clusterversion := openshiftv1.ClusterVersion{} + + err := p.RestClient.RESTClient().Get(). + AbsPath("apis", "config.openshift.io", "v1", "clusterversions", "version"). + Do(context.TODO()). + Into(&clusterversion) + + if err != nil { + return false, err + } + + dindargs := []string{"--log-driver", "fluentd", "--log-opt", "fluentd-address=localhost:9880", "--mtu", "1400", "--iptables=true"} + + re := regexp.MustCompile(`4\.[0-9]\.[0-9]`) + if re.MatchString(clusterversion.Status.Desired.Version) { + dindargs = []string{"--log-driver", "fluentd", "--log-opt", "fluentd-address=localhost:9880", "--mtu", "1400", "--iptables=false"} + } + + instance.Spec.DindArgs = dindargs + + return true, nil +} diff --git a/pkg/offering/openshift/peer/peer_suite_test.go b/pkg/offering/openshift/peer/peer_suite_test.go new file mode 100644 index 00000000..2d60b312 --- /dev/null +++ b/pkg/offering/openshift/peer/peer_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftpeer_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPeer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Peer Suite") +} diff --git a/pkg/offering/openshift/peer/peer_test.go b/pkg/offering/openshift/peer/peer_test.go new file mode 100644 index 00000000..ba2768ff --- /dev/null +++ b/pkg/offering/openshift/peer/peer_test.go @@ -0,0 +1,221 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package openshiftpeer_test + +import ( + "context" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + cmocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/enroller" + peerinit "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + managermocks "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/mocks" + basepeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + peermocks "github.com/IBM-Blockchain/fabric-operator/pkg/offering/base/peer/mocks" + openshiftpeer "github.com/IBM-Blockchain/fabric-operator/pkg/offering/openshift/peer" + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/IBM-Blockchain/fabric-operator/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Openshift Peer", func() { + var ( + peer *openshiftpeer.Peer + instance *current.IBPPeer + mockKubeClient *cmocks.Client + cfg *config.Config + + deploymentMgr *peermocks.DeploymentManager + peerRouteManager *managermocks.ResourceManager + operationsRouteManager *managermocks.ResourceManager + grpcRouteManager *managermocks.ResourceManager + update *mocks.Update + ) + + Context("Reconciles", func() { + BeforeEach(func() { + mockKubeClient = &cmocks.Client{} + update = &mocks.Update{} + + replicas := int32(1) + instance = ¤t.IBPPeer{ + TypeMeta: metav1.TypeMeta{ + Kind: "IBPPeer", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "peer1", + Namespace: "random", + }, + Spec: current.IBPPeerSpec{ + PeerExternalEndpoint: "address", + Domain: "domain", + DindArgs: []string{"fake", "args"}, + StateDb: "couchdb", + Replicas: &replicas, + Images: ¤t.PeerImages{}, + FabricVersion: "1.4.9", + }, + Status: current.IBPPeerStatus{ + CRStatus: current.CRStatus{ + Version: version.Operator, + }, + }, + } + + mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.Secret: + o := obj.(*corev1.Secret) + switch types.Name { + case "tls-" + instance.Name + "-signcert": + o.Name = "tls-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "tls-" + instance.Name + "-cacerts": + o.Name = "tls-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "ecert-" + instance.Name + "-signcert": + o.Name = "ecert-" + instance.Name + "-signcert" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cert.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + case "ecert-" + instance.Name + "-cacerts": + o.Name = "ecert-" + instance.Name + "-cacerts" + o.Namespace = instance.Namespace + o.Data = map[string][]byte{"cacert-0.pem": []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNwVENDQWtxZ0F3SUJBZ0lSQU1FeVZVcDRMdlYydEFUREhlWklldDh3Q2dZSUtvWkl6ajBFQXdJd2daVXgKQ3pBSkJnTlZCQVlUQWxWVE1SY3dGUVlEVlFRSUV3NU9iM0owYUNCRFlYSnZiR2x1WVRFUE1BMEdBMVVFQnhNRwpSSFZ5YUdGdE1Rd3dDZ1lEVlFRS0V3TkpRazB4RXpBUkJnTlZCQXNUQ2tKc2IyTnJZMmhoYVc0eE9UQTNCZ05WCkJBTVRNR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzFqWVM1aGNIQnpMbkIxYldGekxtOXpMbVo1Y21VdWFXSnQKTG1OdmJUQWVGdzB5TURBeE1qSXhPREExTURCYUZ3MHpNREF4TVRreE9EQTFNREJhTUlHVk1Rc3dDUVlEVlFRRwpFd0pWVXpFWE1CVUdBMVVFQ0JNT1RtOXlkR2dnUTJGeWIyeHBibUV4RHpBTkJnTlZCQWNUQmtSMWNtaGhiVEVNCk1Bb0dBMVVFQ2hNRFNVSk5NUk13RVFZRFZRUUxFd3BDYkc5amEyTm9ZV2x1TVRrd053WURWUVFERXpCcVlXNHkKTWkxdmNtUmxjbVZ5YjNKblkyRXRZMkV1WVhCd2N5NXdkVzFoY3k1dmN5NW1lWEpsTG1saWJTNWpiMjB3V1RBVApCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTR0lHUFkvZC9tQVhMejM4SlROR3F5bldpOTJXUVB6cnN0Cm5vdEFWZlh0dHZ5QWJXdTRNbWNUMEh6UnBTWjNDcGdxYUNXcTg1MUwyV09LcnZ6L0JPREpvM2t3ZHpCMUJnTlYKSFJFRWJqQnNnakJxWVc0eU1pMXZjbVJsY21WeWIzSm5ZMkV0WTJFdVlYQndjeTV3ZFcxaGN5NXZjeTVtZVhKbApMbWxpYlM1amIyMkNPR3BoYmpJeUxXOXlaR1Z5WlhKdmNtZGpZUzF2Y0dWeVlYUnBiMjV6TG1Gd2NITXVjSFZ0CllYTXViM011Wm5seVpTNXBZbTB1WTI5dE1Bb0dDQ3FHU000OUJBTUNBMGtBTUVZQ0lRQzM3Y1pkNFY2RThPQ1IKaDloQXEyK0dyR21FVTFQU0I1eHo5RkdEWThkODZRSWhBT1crM3Urb2d4bFNWNUoyR3ZYbHRaQmpXRkpvYnJxeApwVVQ4cW4yMDA1b0wKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo")} + } + } + return nil + } + + deploymentMgr = &peermocks.DeploymentManager{} + serviceMgr := &managermocks.ResourceManager{} + pvcMgr := &managermocks.ResourceManager{} + couchPvcMgr := &managermocks.ResourceManager{} + configMapMgr := &managermocks.ResourceManager{} + roleMgr := &managermocks.ResourceManager{} + roleBindingMgr := &managermocks.ResourceManager{} + serviceAccountMgr := &managermocks.ResourceManager{} + certificateMgr := &peermocks.CertificateManager{} + restartMgr := &peermocks.RestartManager{} + + peerRouteManager = &managermocks.ResourceManager{} + operationsRouteManager = &managermocks.ResourceManager{} + grpcRouteManager = &managermocks.ResourceManager{} + + scheme := &runtime.Scheme{} + cfg = &config.Config{ + PeerInitConfig: &peerinit.Config{ + OUFile: "../../../../defaultconfig/peer/ouconfig.yaml", + CorePeerFile: "../../../../defaultconfig/peer/core.yaml", + }, + } + initializer := &peermocks.InitializeIBPPeer{} + initializer.GetInitPeerReturns(&peerinit.Peer{}, nil) + peer = &openshiftpeer.Peer{ + Peer: &basepeer.Peer{ + Config: cfg, + Client: mockKubeClient, + Scheme: scheme, + DeploymentManager: deploymentMgr, + ServiceManager: serviceMgr, + PVCManager: pvcMgr, + StateDBPVCManager: couchPvcMgr, + FluentDConfigMapManager: configMapMgr, + RoleManager: roleMgr, + RoleBindingManager: roleBindingMgr, + ServiceAccountManager: serviceAccountMgr, + Initializer: initializer, + CertificateManager: certificateMgr, + Restart: restartMgr, + }, + RouteManager: peerRouteManager, + OperationsRouteManager: operationsRouteManager, + GRPCRouteManager: grpcRouteManager, + } + }) + + It("returns an error if peer route manager fails to reconcile", func() { + peerRouteManager.ReconcileReturns(errors.New("failed to reconcile peer route")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Peer Route reconciliation: failed to reconcile peer route")) + }) + + It("returns an error if operations route manager fails to reconcile", func() { + operationsRouteManager.ReconcileReturns(errors.New("failed to reconcile operations route")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Operations Route reconciliation: failed to reconcile operations route")) + }) + + It("returns an error if grpc web route manager fails to reconcile", func() { + grpcRouteManager.ReconcileReturns(errors.New("failed to reconcile grpc web route")) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to reconcile managers: failed Peer GRPC Route reconciliation: failed to reconcile grpc web route")) + }) + + // Disabling this test because the function uses rest client which cannot be mocked + // It("adds dind args in CR if not passed", func() { + // mockKubeClient.GetStub = func(ctx context.Context, types types.NamespacedName, obj client.Object) error { + // switch obj.(type) { + // case *openshiftv1.ClusterVersion: + // cv := &openshiftv1.ClusterVersion{ + // Spec: openshiftv1.ClusterVersionSpec{ + // Channel: "stable-4.2", + // }, + // } + + // obj = cv.DeepCopy() + // } + + // return nil + + // } + // _, err := peer.SelectDinDArgs(instance) + // Expect(err).NotTo(HaveOccurred()) + + // Expect(len(instance.Spec.DindArgs)).NotTo(Equal(0)) + // }) + + It("returns a breaking error if initialization fails", func() { + cfg.PeerInitConfig.CorePeerFile = "../../../../defaultconfig/peer/badfile.yaml" + peer.Initializer = peerinit.New(cfg.PeerInitConfig, nil, nil, nil, nil, enroller.HSMEnrollJobTimeouts{}) + _, err := peer.Reconcile(instance, update) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Code: 22 - failed to initialize peer: open")) + Expect(operatorerrors.IsBreakingError(err, "msg", nil)).NotTo(HaveOccurred()) + }) + + It("reconciles openshift peer", func() { + _, err := peer.Reconcile(instance, update) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/operatorerrors/errors.go b/pkg/operatorerrors/errors.go new file mode 100644 index 00000000..0ce44d26 --- /dev/null +++ b/pkg/operatorerrors/errors.go @@ -0,0 +1,153 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorerrors + +import ( + "fmt" + + "github.com/go-logr/logr" +) + +const ( + InvalidDeploymentCreateRequest = 1 + InvalidDeploymentUpdateRequest = 2 + InvalidServiceCreateRequest = 3 + InvalidServiceUpdateRequest = 4 + InvalidPVCCreateRequest = 5 + InvalidPVCUpdateRequest = 6 + InvalidConfigMapCreateRequest = 7 + InvalidConfigMapUpdateRequest = 8 + InvalidServiceAccountCreateRequest = 9 + InvalidServiceAccountUpdateRequest = 10 + InvalidRoleCreateRequest = 11 + InvalidRoleUpdateRequest = 12 + InvalidRoleBindingCreateRequest = 13 + InvalidRoleBindingUpdateRequest = 14 + InvalidPeerInitSpec = 15 + InvalidOrdererType = 16 + InvalidOrdererNodeCreateRequest = 17 + InvalidOrdererNodeUpdateRequest = 18 + InvalidOrdererInitSpec = 19 + CAInitilizationFailed = 20 + OrdererInitilizationFailed = 21 + PeerInitilizationFailed = 22 + MigrationFailed = 23 + FabricPeerMigrationFailed = 24 + FabricOrdererMigrationFailed = 25 + InvalidCustomResourceCreateRequest = 26 + FabricCAMigrationFailed = 27 +) + +var ( + BreakingErrors = map[int]*struct{}{ + InvalidDeploymentCreateRequest: nil, + InvalidDeploymentUpdateRequest: nil, + InvalidServiceCreateRequest: nil, + InvalidServiceUpdateRequest: nil, + InvalidPVCCreateRequest: nil, + InvalidPVCUpdateRequest: nil, + InvalidConfigMapCreateRequest: nil, + InvalidConfigMapUpdateRequest: nil, + InvalidServiceAccountCreateRequest: nil, + InvalidServiceAccountUpdateRequest: nil, + InvalidRoleCreateRequest: nil, + InvalidRoleUpdateRequest: nil, + InvalidRoleBindingCreateRequest: nil, + InvalidRoleBindingUpdateRequest: nil, + InvalidPeerInitSpec: nil, + InvalidOrdererType: nil, + InvalidOrdererInitSpec: nil, + CAInitilizationFailed: nil, + OrdererInitilizationFailed: nil, + PeerInitilizationFailed: nil, + FabricPeerMigrationFailed: nil, + FabricOrdererMigrationFailed: nil, + InvalidCustomResourceCreateRequest: nil, + } +) + +type OperatorError struct { + Code int + Message string +} + +func (e *OperatorError) Error() string { + return e.String() +} + +func (e *OperatorError) String() string { + return fmt.Sprintf("Code: %d - %s", e.Code, e.Message) +} + +func New(code int, msg string) *OperatorError { + return &OperatorError{ + Code: code, + Message: msg, + } +} + +func Wrap(err error, code int, msg string) *OperatorError { + return &OperatorError{ + Code: code, + Message: fmt.Sprintf("%s: %s", msg, err.Error()), + } +} + +func IsBreakingError(err error, msg string, log logr.Logger) error { + oerr := IsOperatorError(err) + if oerr == nil { + return err + } + _, breakingError := BreakingErrors[oerr.Code] + if breakingError { + if log != nil { + log.Error(err, fmt.Sprintf("Breaking Error: %s", msg)) + } + return nil + } + return err +} + +func GetErrorCode(err error) int { + oerr := IsOperatorError(err) + if oerr == nil { + return 0 + } + + return oerr.Code +} + +type Causer interface { + Cause() error +} + +// GetCause gets the root cause of the error +func IsOperatorError(err error) *OperatorError { + for err != nil { + switch err.(type) { + case *OperatorError: + return err.(*OperatorError) + case Causer: + err = err.(Causer).Cause() + default: + return nil + } + } + return nil +} diff --git a/pkg/operatorerrors/errors_test.go b/pkg/operatorerrors/errors_test.go new file mode 100644 index 00000000..096ac474 --- /dev/null +++ b/pkg/operatorerrors/errors_test.go @@ -0,0 +1,54 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorerrors_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" + "github.com/go-logr/logr" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var _ = Describe("operator errors", func() { + var ( + operatorErr *operatorerrors.OperatorError + log logr.Logger + ) + + BeforeEach(func() { + operatorErr = operatorerrors.New(operatorerrors.InvalidDeploymentCreateRequest, "operator error occurred") + log = logf.Log.WithName("test") + }) + + Context("breaking error", func() { + It("returns nil if breaking error detected", func() { + err := operatorerrors.IsBreakingError(operatorErr, "operator error", log) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns err if errors is not an operator error", func() { + err := operatorerrors.IsBreakingError(errors.New("non-operator error"), "not an operator error", log) + Expect(err).To(HaveOccurred()) + }) + }) +}) diff --git a/pkg/operatorerrors/operatorerrors_suite_test.go b/pkg/operatorerrors/operatorerrors_suite_test.go new file mode 100644 index 00000000..c25c246b --- /dev/null +++ b/pkg/operatorerrors/operatorerrors_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package operatorerrors_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOperatorerrors(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Operatorerrors Suite") +} diff --git a/pkg/restart/configmap/configmap_suite_test.go b/pkg/restart/configmap/configmap_suite_test.go new file mode 100644 index 00000000..6656157b --- /dev/null +++ b/pkg/restart/configmap/configmap_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configmap_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConfigmap(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Configmap Suite") +} diff --git a/pkg/restart/configmap/configmap_test.go b/pkg/restart/configmap/configmap_test.go new file mode 100644 index 00000000..6c90a7ce --- /dev/null +++ b/pkg/restart/configmap/configmap_test.go @@ -0,0 +1,107 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configmap_test + +import ( + "context" + "encoding/json" + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/configmap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("Configmap", func() { + + var ( + mockClient *controllermocks.Client + manager *configmap.Manager + ) + + BeforeEach(func() { + mockClient = &controllermocks.Client{} + manager = configmap.NewManager(mockClient) + }) + + Context("get restart config from", func() { + It("returns error if fails to get config map", func() { + mockClient.GetReturns(errors.New("fake error")) + err := manager.GetRestartConfigFrom("test-config", "namespace", nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get test-config config map")) + }) + + It("returns error if fails to unmarshal data", func() { + into := &TestConfig{} + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.ConfigMap) + o.Name = "test-config" + o.Namespace = ns.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": []byte("test"), + } + return nil + } + + err := manager.GetRestartConfigFrom("test-config", "namespace", into) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to unmarshal test-config config map")) + }) + + It("unmarshals config map data into struct", func() { + into := &TestConfig{} + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + cfg := &TestConfig{ + Field: "test", + } + bytes, _ := json.Marshal(cfg) + + o := obj.(*corev1.ConfigMap) + o.Name = "test-config" + o.Namespace = ns.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": bytes, + } + return nil + } + + err := manager.GetRestartConfigFrom("test-config", "namespace", into) + Expect(err).NotTo(HaveOccurred()) + Expect(into.Field).To(Equal("test")) + }) + }) + + It("update config", func() { + mockClient.CreateOrUpdateReturns(errors.New("fake error")) + err := manager.UpdateConfig("test-config", "ns", &TestConfig{}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create or update test-config config map")) + }) +}) + +type TestConfig struct { + Field string +} diff --git a/pkg/restart/configmap/manager.go b/pkg/restart/configmap/manager.go new file mode 100644 index 00000000..930f92fd --- /dev/null +++ b/pkg/restart/configmap/manager.go @@ -0,0 +1,94 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package configmap + +import ( + "context" + "encoding/json" + + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type Manager struct { + Client k8sclient.Client +} + +func NewManager(client k8sclient.Client) *Manager { + return &Manager{ + Client: client, + } +} + +func (c *Manager) GetRestartConfigFrom(cmName string, namespace string, into interface{}) error { + cm := &corev1.ConfigMap{} + n := types.NamespacedName{ + Name: cmName, + Namespace: namespace, + } + + err := c.Client.Get(context.TODO(), n, cm) + if err != nil { + if !k8serrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get %s config map", cmName) + } + + // If config map doesn't exist yet, keep into cfg empty + return nil + } + + if cm.BinaryData["restart-config.yaml"] == nil { + return nil + } + + err = json.Unmarshal(cm.BinaryData["restart-config.yaml"], into) + if err != nil { + return errors.Wrapf(err, "failed to unmarshal %s config map", cmName) + } + + return nil +} + +func (c *Manager) UpdateConfig(cmName string, namespace string, cfg interface{}) error { + bytes, err := json.Marshal(cfg) + if err != nil { + return err + } + + cm := &corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: cmName, + Namespace: namespace, + }, + BinaryData: map[string][]byte{ + "restart-config.yaml": bytes, + }, + } + + err = c.Client.CreateOrUpdate(context.TODO(), cm) + if err != nil { + return errors.Wrapf(err, "failed to create or update %s config map", cmName) + } + + return nil +} diff --git a/pkg/restart/restart.go b/pkg/restart/restart.go new file mode 100644 index 00000000..55c2fbfc --- /dev/null +++ b/pkg/restart/restart.go @@ -0,0 +1,360 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package restart + +import ( + "fmt" + "strings" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/configmap" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + "github.com/pkg/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("restart_manager") + +type RestartManager struct { + Client k8sclient.Client + Timers map[string]*time.Timer + WaitTime time.Duration + ConfigMapManager *configmap.Manager + StaggerRestartsService *staggerrestarts.StaggerRestartsService +} + +func New(client k8sclient.Client, waitTime, timeout time.Duration) *RestartManager { + r := &RestartManager{ + Client: client, + Timers: map[string]*time.Timer{}, + WaitTime: waitTime, + ConfigMapManager: configmap.NewManager(client), + StaggerRestartsService: staggerrestarts.New(client, timeout), + } + + return r +} + +func (r *RestartManager) ForAdminCertUpdate(instance v1.Object) error { + return r.updateConfigFor(instance, ADMINCERT) +} + +func (r *RestartManager) ForCertUpdate(certType common.SecretType, instance v1.Object) error { + var err error + switch certType { + case common.TLS: + err = r.ForTLSReenroll(instance) + case common.ECERT: + err = r.ForEcertReenroll(instance) + } + + if err != nil { + return err + } + + return nil +} + +func (r *RestartManager) ForEcertReenroll(instance v1.Object) error { + return r.updateConfigFor(instance, ECERTUPDATE) +} + +func (r *RestartManager) ForTLSReenroll(instance v1.Object) error { + return r.updateConfigFor(instance, TLSUPDATE) +} + +func (r *RestartManager) ForConfigOverride(instance v1.Object) error { + return r.updateConfigFor(instance, CONFIGOVERRIDE) +} + +func (r *RestartManager) ForMigration(instance v1.Object) error { + return r.updateConfigFor(instance, MIGRATION) +} + +func (r *RestartManager) ForNodeOU(instance v1.Object) error { + return r.updateConfigFor(instance, NODEOU) +} + +func (r *RestartManager) ForConfigMapUpdate(instance v1.Object) error { + return r.updateConfigFor(instance, CONFIGMAPUPDATE) +} + +func (r *RestartManager) ForRestartAction(instance v1.Object) error { + return r.updateConfigFor(instance, RESTARTACTION) +} + +// Updates the operator-config for the given reason by setting the request +// status to 'pending' and request timestamp to the current time: +// +// instances[instance_name].Requests[reason].Status = "pending" +func (r *RestartManager) updateConfigFor(instance v1.Object, reason Reason) error { + cfg, err := r.GetConfig(instance) + if err != nil { + return err + } + + if cfg.Instances == nil { + cfg.Instances = map[string]*Restart{} + } + _, ok := cfg.Instances[instance.GetName()] + if !ok { + cfg.Instances[instance.GetName()] = &Restart{} + } + + restart := cfg.Instances[instance.GetName()] + updateRestartRequest(restart, reason) + + log.Info(fmt.Sprintf("Updating operator-config map, %s restart requested due to %s", instance.GetName(), reason)) + err = r.UpdateConfigMap(cfg, instance) + if err != nil { + return err + } + + return nil +} + +func updateRestartRequest(restart *Restart, reason Reason) { + if restart.Requests == nil { + restart.Requests = map[Reason]*Request{} + } + + if restart.Requests[reason] == nil { + restart.Requests[reason] = &Request{} + } + + // Set request time + req := restart.Requests[reason] + if req.Status != Pending { + req.Status = Pending + req.RequestTimestamp = time.Now().UTC().Format(time.RFC3339) + } +} + +type Instance interface { + v1.Object + GetMSPID() string +} + +// TriggerIfNeeded checks operator-config for any pending restarts, sets a timer to restart +// the deployment if required, and restarts the deployment. +func (r *RestartManager) TriggerIfNeeded(instance Instance) error { + var trigger bool + + cfg, err := r.GetConfig(instance) + if err != nil { + return err + } + + restart := cfg.Instances[instance.GetName()] + if restart == nil || restart.Requests == nil { + // Do nothing if restart doesn't have any pending requests + return nil + } + + reasonList := []string{} + for reason, req := range restart.Requests { + if req != nil { + if req.Status == Pending { + reasonList = append(reasonList, string(reason)) + if r.triggerRestart(req) { + trigger = true + } + } + + } + } + reasonString := strings.Join(reasonList, ",") + + if trigger { + err = r.RestartDeployment(instance, reasonString) + if err != nil { + return err + } + } else if r.pendingRequests(restart) { + err = r.SetTimer(instance, reasonString) + if err != nil { + return errors.Wrap(err, "failed to set timer to restart deployment") + } + } + + return nil +} + +func (r *RestartManager) triggerRestart(req *Request) bool { + if req != nil { + if req.Status == Pending { + if req.LastActionTimestamp == "" { // no previous restart has occurred + return true + } + + lastRestart, err := time.Parse(time.RFC3339, req.LastActionTimestamp) + if err != nil { + return true + } + + requestedRestart, err := time.Parse(time.RFC3339, req.RequestTimestamp) + if err != nil { + return true + } + + if requestedRestart.Sub(lastRestart) >= r.WaitTime { + return true + } + } + } + + return false +} + +func (r *RestartManager) pendingRequests(restart *Restart) bool { + for _, req := range restart.Requests { + if req.Status == Pending { + return true + } + } + return false +} + +func (r *RestartManager) SetTimer(instance Instance, reason string) error { + cfg, err := r.GetConfig(instance) + if err != nil { + return err + } + + restart := cfg.Instances[instance.GetName()] + + oldestRequestTime := time.Now().UTC() + lastActionTime := "" + // Want to set timer duration based on oldest pending request + for _, req := range restart.Requests { + if req != nil { + requestTime, err := time.Parse(time.RFC3339, req.RequestTimestamp) + if err == nil { + if requestTime.Before(oldestRequestTime) { + oldestRequestTime = requestTime + lastActionTime = req.LastActionTimestamp + } + } + } + } + + // Set timer if not already running + if r.Timers[instance.GetName()] == nil { + dur := r.getTimerDuration(lastActionTime, oldestRequestTime) + log.Info(fmt.Sprintf("Setting timer to restart %s in %f minutes", instance.GetName(), dur.Minutes())) + + r.Timers[instance.GetName()] = time.AfterFunc(dur, func() { + err := r.RestartDeployment(instance, reason) + if err != nil { + log.Error(err, fmt.Sprintf("failed to restart deployment for %s", instance.GetName())) + } + }) + } else { + log.Info(fmt.Sprintf("Timer already set to restart %s shortly", instance.GetName())) + } + + return nil +} + +// If lastRestartTime was less than 10 min (or value of WaitTime) ago, calculate how much +// time remains before WaitTime has passed to trigger next restart +func (r *RestartManager) getTimerDuration(actionTime string, requestTime time.Time) time.Duration { + lastRestartTime, err := time.Parse(time.RFC3339, actionTime) + if err != nil { + // Default to WaitTime + return r.WaitTime + } + timePassed := requestTime.Sub(lastRestartTime) + return r.WaitTime - timePassed +} + +// RestartDeployment adds the instance to the queue to stagger restarts +func (r *RestartManager) RestartDeployment(instance Instance, reason string) error { + log.Info(fmt.Sprintf("Queuing instance %s for restart", instance.GetName())) + + err := r.ClearRestartConfigForInstance(instance) + if err != nil { + return errors.Wrap(err, "failed to clear restart config") + } + + err = r.StaggerRestartsService.Restart(instance, reason) + if err != nil { + return errors.Wrap(err, "failed to add restart request to queue") + } + + return nil +} + +func (r *RestartManager) ClearRestartConfigForInstance(instance v1.Object) error { + cfg, err := r.GetConfig(instance) + if err != nil { + return err + } + + if cfg.Instances == nil || cfg.Instances[instance.GetName()] == nil { + return nil + } + + for _, req := range cfg.Instances[instance.GetName()].Requests { + if req != nil && req.Status == Pending { + clearRestart(req) + } + } + + // Stop timer if previously set + if r.Timers[instance.GetName()] != nil { + r.Timers[instance.GetName()].Stop() + r.Timers[instance.GetName()] = nil + } + + err = r.UpdateConfigMap(cfg, instance) + if err != nil { + return err + } + + return nil +} + +func clearRestart(req *Request) { + req.LastActionTimestamp = time.Now().UTC().Format(time.RFC3339) + req.RequestTimestamp = "" + req.Status = Complete +} + +func (r *RestartManager) GetConfig(instance v1.Object) (*Config, error) { + cmName := "operator-config" + + cfg := &Config{} + err := r.ConfigMapManager.GetRestartConfigFrom(cmName, instance.GetNamespace(), cfg) + if err != nil { + return nil, err + } + + return cfg, nil +} + +func (r *RestartManager) UpdateConfigMap(cfg *Config, instance v1.Object) error { + cmName := "operator-config" + + return r.ConfigMapManager.UpdateConfig(cmName, instance.GetNamespace(), cfg) +} diff --git a/pkg/restart/restart_structs.go b/pkg/restart/restart_structs.go new file mode 100644 index 00000000..5e253d91 --- /dev/null +++ b/pkg/restart/restart_structs.go @@ -0,0 +1,54 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package restart + +type Reason string + +const ( + ADMINCERT Reason = "adminCert" + ECERTUPDATE Reason = "ecertUpdate" + TLSUPDATE Reason = "tlsUpdate" + CONFIGOVERRIDE Reason = "configOverride" + MIGRATION Reason = "migration" + NODEOU Reason = "nodeOU" + CONFIGMAPUPDATE Reason = "configMapUpdate" + RESTARTACTION Reason = "restartAction" +) + +type Status string + +const ( + Pending Status = "pending" + Complete Status = "complete" +) + +type Request struct { + Status Status + RequestTimestamp string + LastActionTimestamp string +} + +type Restart struct { + Requests map[Reason]*Request +} + +// Config defines operator-config.BinaryData["restart-config.yaml"] +type Config struct { + Instances map[string]*Restart +} diff --git a/pkg/restart/restart_suite_test.go b/pkg/restart/restart_suite_test.go new file mode 100644 index 00000000..e30d4dbf --- /dev/null +++ b/pkg/restart/restart_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package restart_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestRestart(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Restart Suite") +} diff --git a/pkg/restart/restart_test.go b/pkg/restart/restart_test.go new file mode 100644 index 00000000..0e2bc1ba --- /dev/null +++ b/pkg/restart/restart_test.go @@ -0,0 +1,380 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package restart_test + +import ( + "context" + "encoding/json" + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("Restart", func() { + SetDefaultEventuallyTimeout(30 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + var ( + mockClient *controllermocks.Client + instance *current.IBPPeer + + restartManager *restart.RestartManager + + cfg *restart.Config + updatedCfg *restart.Config + testTimestamp string + ) + + BeforeEach(func() { + mockClient = &controllermocks.Client{} + restartManager = restart.New(mockClient, 10*time.Minute, 5*time.Minute) + + instance = ¤t.IBPPeer{} + instance.Name = "peer1" + instance.Namespace = "default" + + testTimestamp = time.Now().UTC().Format(time.RFC3339) + cfg = &restart.Config{ + Instances: map[string]*restart.Restart{ + "peer1": { + Requests: map[restart.Reason]*restart.Request{ + restart.ADMINCERT: { + RequestTimestamp: testTimestamp, + Status: restart.Pending, + }, + }, + }, + "peer2": { + Requests: map[restart.Reason]*restart.Request{ + restart.ADMINCERT: { + LastActionTimestamp: time.Now().Add(-5 * time.Minute).UTC().Format(time.RFC3339), + }, + }, + }, + "peer3": { + Requests: map[restart.Reason]*restart.Request{ + restart.ADMINCERT: { + LastActionTimestamp: time.Now().Add(-5 * time.Second).UTC().Format(time.RFC3339), + RequestTimestamp: testTimestamp, + Status: restart.Pending, + }, + }, + }, + "peer4": { + Requests: map[restart.Reason]*restart.Request{ + restart.ADMINCERT: { + LastActionTimestamp: time.Now().Add(-15 * time.Minute).UTC().Format(time.RFC3339), + RequestTimestamp: testTimestamp, + Status: restart.Pending, + }, + restart.ECERTUPDATE: { + LastActionTimestamp: time.Now().Add(-5 * time.Minute).UTC().Format(time.RFC3339), + RequestTimestamp: testTimestamp, + Status: restart.Pending, + }, + }, + }, + }, + } + + cfgBytes, err := json.Marshal(cfg) + Expect(err).NotTo(HaveOccurred()) + + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + switch ns.Name { + case "operator-config": + o.Name = "operator-config" + o.Namespace = instance.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": cfgBytes, + } + } + case *appsv1.Deployment: + o := obj.(*appsv1.Deployment) + o.Name = ns.Name + o.Namespace = instance.Namespace + } + + return nil + } + + updatedCfg = &restart.Config{} + mockClient.CreateOrUpdateStub = func(ctx context.Context, obj client.Object, opts ...k8sclient.CreateOrUpdateOption) error { + o := obj.(*corev1.ConfigMap) + err := json.Unmarshal(o.BinaryData["restart-config.yaml"], updatedCfg) + Expect(err).NotTo(HaveOccurred()) + return nil + } + }) + + Context("get config from config map", func() { + It("returns empty config if config map doesn't exist", func() { + mockClient.GetReturns(k8serrors.NewNotFound(schema.GroupResource{}, "not found")) + config, err := restartManager.GetConfig(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(config).To(Equal(&restart.Config{})) + }) + + It("returns error if failed to get existing config map", func() { + mockClient.GetReturns(errors.New("get error")) + _, err := restartManager.GetConfig(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(Equal("failed to get operator-config config map: get error")) + }) + + It("returns error if fails to unmarshal config map", func() { + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.ConfigMap) + o.Name = "operator-config" + o.Namespace = instance.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": []byte("invalid"), + } + return nil + } + _, err := restartManager.GetConfig(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("failed to unmarshal operator-config config map")) + }) + + It("returns restart config from config map", func() { + config, err := restartManager.GetConfig(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(config).To(Equal(cfg)) + }) + }) + + Context("update config map", func() { + It("returns error if fails to update config map", func() { + mockClient.CreateOrUpdateReturns(errors.New("update error")) + err := restartManager.UpdateConfigMap(cfg, instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to create or update operator-config config map: update error")) + }) + + It("updates config map", func() { + err := restartManager.UpdateConfigMap(cfg, instance) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("for admin cert update", func() { + It("returns error if fails to get config from config map", func() { + mockClient.GetReturns(errors.New("get error")) + err := restartManager.ForAdminCertUpdate(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to get operator-config config map: get error")) + }) + + It("returns error if fails to update config map", func() { + mockClient.CreateOrUpdateReturns(errors.New("update error")) + err := restartManager.ForAdminCertUpdate(instance) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to create or update operator-config config map: update error")) + }) + + It("doesn't set RequestTimestamp if already set", func() { + instance.Name = "peer1" + err := restartManager.ForAdminCertUpdate(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["peer1"].Requests[restart.ADMINCERT].RequestTimestamp).To(Equal(testTimestamp)) + }) + + It("sets RequestTimestamp if not set for that instance", func() { + instance.Name = "peer2" + err := restartManager.ForAdminCertUpdate(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["peer2"].Requests[restart.ADMINCERT].RequestTimestamp).NotTo(Equal("")) + }) + + It("sets RequestTimestamp for instance if instance not yet in config", func() { + instance.Name = "newpeer" + err := restartManager.ForAdminCertUpdate(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["newpeer"].Requests[restart.ADMINCERT].RequestTimestamp).NotTo(Equal("")) + }) + + }) + + Context("for ecert reenroll", func() { + It("sets RequestTimestamp for instance if not set for that instance", func() { + err := restartManager.ForEcertReenroll(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["peer1"].Requests[restart.ECERTUPDATE].RequestTimestamp).NotTo(Equal("")) + }) + }) + + Context("for tls reenroll", func() { + It("sets RequestTimestamp for instance if not set for that instance", func() { + err := restartManager.ForTLSReenroll(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["peer1"].Requests[restart.TLSUPDATE].RequestTimestamp).NotTo(Equal("")) + }) + }) + + Context("for config override", func() { + It("sets RequestTimestamp for instance if not set for that instance", func() { + err := restartManager.ForConfigOverride(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["peer1"].Requests[restart.CONFIGOVERRIDE].RequestTimestamp).NotTo(Equal("")) + }) + }) + + Context("for migration", func() { + It("sets RequestTimestamp for instance if not set for that instance", func() { + err := restartManager.ForMigration(instance) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCfg.Instances["peer1"].Requests[restart.MIGRATION].RequestTimestamp).NotTo(Equal("")) + }) + }) + + Context("trigger if needed", func() { + It("returns error if fails to get config map", func() { + mockClient.GetReturns(errors.New("get error")) + err := restartManager.TriggerIfNeeded(instance) + Expect(err).To(HaveOccurred()) + }) + + It("returns nil if instance is not in config map", func() { + instance.Name = "fake peer" + err := restartManager.TriggerIfNeeded(instance) + Expect(err).NotTo(HaveOccurred()) + }) + + It("triggers restart if there are pending restarts and no previous restart", func() { + instance.Name = "peer1" + err := restartManager.TriggerIfNeeded(instance) + Expect(err).NotTo(HaveOccurred()) + + By("clearing restart", func() { + for _, req := range updatedCfg.Instances["peer1"].Requests { + Expect(req.Status).To(Equal(restart.Complete)) + Expect(req.RequestTimestamp).To(Equal("")) + Expect(req.LastActionTimestamp).NotTo(Equal("")) + } + }) + + By("adding restart request to queue", func() { + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(1) + cfgBytes := cm.(*corev1.ConfigMap).BinaryData["restart-config.yaml"] + restartcfg := &staggerrestarts.RestartConfig{} + err = json.Unmarshal(cfgBytes, restartcfg) + Expect(err).NotTo(HaveOccurred()) + + Expect(len(restartcfg.Queues[instance.GetMSPID()])).To(Equal(1)) + Expect(restartcfg.Queues[instance.GetMSPID()][0].CRName).To(Equal(instance.Name)) + Expect(restartcfg.Queues[instance.GetMSPID()][0].Reason).To(Equal("adminCert")) + Expect(restartcfg.Queues[instance.GetMSPID()][0].Status).To(Equal(staggerrestarts.Pending)) + }) + }) + + It("returns nil if there are no pending restarts for instance", func() { + instance.Name = "peer2" + err := restartManager.TriggerIfNeeded(instance) + Expect(err).NotTo(HaveOccurred()) + }) + + It("sets timer if there are pending restarts but last restart action timestamp is sooner than 10 min", func() { + instance.Name = "peer3" + err := restartManager.TriggerIfNeeded(instance) + Expect(err).NotTo(HaveOccurred()) + + By("not updating config map", func() { + Expect(mockClient.CreateOrUpdateCallCount()).To(Equal(0)) + }) + + By("setting timer", func() { + // timer.Stop() == true means that it was set + Expect(restartManager.Timers["peer3"].Stop()).To(Equal(true)) + }) + }) + + It("triggers restart if there are pending restarts and at least one request last action timestamp is more than 10 min ago", func() { + instance.Name = "peer4" + err := restartManager.TriggerIfNeeded(instance) + Expect(err).NotTo(HaveOccurred()) + + By("clearing restart", func() { + for reason, req := range updatedCfg.Instances["peer4"].Requests { + Expect(req.Status).To(Equal(restart.Complete)) + Expect(req.RequestTimestamp).To(Equal("")) + Expect(req.LastActionTimestamp).NotTo(Equal(cfg.Instances["peer4"].Requests[reason].LastActionTimestamp)) + } + }) + + By("adding restart request to queue", func() { + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(1) + cfgBytes := cm.(*corev1.ConfigMap).BinaryData["restart-config.yaml"] + restartcfg := &staggerrestarts.RestartConfig{} + err = json.Unmarshal(cfgBytes, restartcfg) + Expect(err).NotTo(HaveOccurred()) + + Expect(len(restartcfg.Queues[instance.GetMSPID()])).To(Equal(1)) + Expect(restartcfg.Queues[instance.GetMSPID()][0].CRName).To(Equal(instance.Name)) + Expect(restartcfg.Queues[instance.GetMSPID()][0].Reason).To(ContainSubstring("adminCert")) + Expect(restartcfg.Queues[instance.GetMSPID()][0].Reason).To(ContainSubstring("ecertUpdate")) + Expect(restartcfg.Queues[instance.GetMSPID()][0].Status).To(Equal(staggerrestarts.Pending)) + }) + }) + }) + + Context("set timer", func() { + BeforeEach(func() { + restartManager.WaitTime = 10 * time.Second + }) + + It("returns error if fails to get config map", func() { + mockClient.GetReturns(errors.New("get error")) + err := restartManager.SetTimer(instance, "") + Expect(err).To(HaveOccurred()) + }) + + It("sets timer for instance if there are pending restarts", func() { + instance.Name = "peer3" + err := restartManager.SetTimer(instance, "") + Expect(err).NotTo(HaveOccurred()) + + // Timer should go off in 5 seconds + time.Sleep(10 * time.Second) + + By("restarting deployment after timer goes off", func() { + Expect(restartManager.Timers["peer3"]).To(BeNil()) + }) + }) + }) + +}) diff --git a/pkg/restart/staggerrestarts/staggerrestarts.go b/pkg/restart/staggerrestarts/staggerrestarts.go new file mode 100644 index 00000000..fe5bafb7 --- /dev/null +++ b/pkg/restart/staggerrestarts/staggerrestarts.go @@ -0,0 +1,408 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package staggerrestarts + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "strings" + "time" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/configmap" + "github.com/pkg/errors" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("stagger_restart_service") + +type Instance interface { + v1.Object + GetMSPID() string +} + +type StaggerRestartsService struct { + Client k8sclient.Client + ConfigMapManager *configmap.Manager + Timeout time.Duration +} + +func New(client k8sclient.Client, timeout time.Duration) *StaggerRestartsService { + return &StaggerRestartsService{ + Client: client, + Timeout: timeout, + ConfigMapManager: configmap.NewManager(client), + } +} + +// Restart is called by the restart manager. +// For CA/Peer/Orderer: adds component to the queue for restart. +// For Console: restarts the component directly as there is only one ibpconsole +// instance per network. We bypass the queue logic for ibpconsoles. +func (s *StaggerRestartsService) Restart(instance Instance, reason string) error { + switch instance.(type) { + case *current.IBPConsole: + if err := s.RestartImmediately("console", instance, reason); err != nil { + return errors.Wrapf(err, "failed to restart %s", instance.GetName()) + } + default: + if err := s.AddToQueue(instance, reason); err != nil { + return errors.Wrapf(err, "failed to add restart request to queue for %s", instance.GetName()) + } + } + + return nil +} + +// AddToQueue is called by the restart manager and handles adding the +// restart request to the queue associated with the instance's MSPID +// in the -restart-config CM. +func (s *StaggerRestartsService) AddToQueue(instance Instance, reason string) error { + var componentType string + switch instance.(type) { + case *current.IBPCA: + componentType = "ca" + case *current.IBPOrderer: + componentType = "orderer" + case *current.IBPPeer: + componentType = "peer" + + } + + err := wait.Poll(time.Second, 3*time.Second, func() (bool, error) { + err := s.addToQueue(componentType, instance, reason) + if err != nil { + log.Error(err, "failed to add to queue") + return false, nil + } + return true, nil + }) + + if err != nil { + return errors.Wrapf(err, "failed to add %s to queue", instance.GetName()) + } + + return nil +} + +func (s *StaggerRestartsService) addToQueue(componentType string, instance Instance, reason string) error { + component := &Component{ + CRName: instance.GetName(), + Reason: reason, + Status: Pending, + } + + restartConfig, err := s.GetConfig(componentType, instance.GetNamespace()) + if err != nil { + return err + } + + // Add component to queue + restartConfig.AddToQueue(instance.GetMSPID(), component) + + err = s.UpdateConfig(componentType, instance.GetNamespace(), restartConfig) + if err != nil { + return err + } + + return nil +} + +func (s *StaggerRestartsService) RestartImmediately(componentType string, instance Instance, reason string) error { + log.Info(fmt.Sprintf("Restarting %s...", instance.GetName())) + err := s.RestartDeployment(instance.GetName(), instance.GetNamespace()) + if err != nil { + return err + } + + component := &Component{ + CRName: instance.GetName(), + Reason: reason, + Status: Restarted, + LastCheckedTimestamp: time.Now().UTC().String(), + } + + restartConfig, err := s.GetConfig(componentType, instance.GetNamespace()) + if err != nil { + return err + } + restartConfig.AddToLog(component) + + err = s.UpdateConfig(componentType, instance.GetNamespace(), restartConfig) + if err != nil { + return err + } + + return nil +} + +// Reconcile is called by the ca/peer/orderer reconcile loops via the restart +// manager when an update to the -restart-config CM is detected +// and handles the different states of the first component of each queue. +// +// Returns true if the controller needs to requeue the request to reconcile the restart manager. +func (s *StaggerRestartsService) Reconcile(componentType, namespace string) (bool, error) { + requeue := false + + restartConfig, err := s.GetConfig(componentType, namespace) + if err != nil { + return requeue, err + } + + updated := false + // Check front component of each queue + for mspid, queue := range restartConfig.Queues { + if len(queue) == 0 { + // queue is empty - do nothing + continue + } + + component := queue[0] + name := component.CRName + + switch component.Status { + case Pending: + log.Info(fmt.Sprintf("%s in pending status, restarting deployment", component.CRName)) + + // Save pod name + pods, err := s.GetRunningPods(name, namespace) + if err != nil { + return requeue, errors.Wrapf(err, "failed to get running pods for %s", name) + } + + if len(pods) > 0 { + component.PodName = pods[0].Name + } + + // Restart component + err = s.RestartDeployment(name, namespace) + if err != nil { + return requeue, errors.Wrapf(err, "failed to restart deployment %s", name) + } + + // Update config + component.Status = Waiting + component.LastCheckedTimestamp = time.Now().UTC().String() + component.CheckUntilTimestamp = time.Now().Add(s.Timeout).UTC().String() + + updated = true + + case Waiting: + pods, err := s.GetRunningPods(name, namespace) + if err != nil { + return requeue, errors.Wrapf(err, "failed to get running pods for %s", name) + } + + // Scenario 1: the pod has restarted + if len(pods) == 1 { + if component.PodName != pods[0].Name { + // Pod has restarted as the old pod has disappeared + log.Info(fmt.Sprintf("%s in completed status, removing from %s restart queue", component.CRName, mspid)) + component.Status = Completed + + restartConfig.AddToLog(component) + restartConfig.PopFromQueue(mspid) + + log.Info(fmt.Sprintf("Remaining restart queue(s) to reconcile: %s", queuesToString(restartConfig.Queues))) + updated = true + + continue + } + } + + // Scenario 2: the pod has not restarted and the wait period has timed out + checkUntil, err := parseTime(component.CheckUntilTimestamp) + if err != nil { + return requeue, errors.Wrap(err, "failed to parse checkUntilTimestamp") + } + if time.Now().UTC().After(checkUntil) { + log.Info(fmt.Sprintf("%s in expired status, has not restarted within %s", component.CRName, s.Timeout.String())) + // Pod has not restarted within s.timeout, move to log + component.Status = Expired + + restartConfig.AddToLog(component) + restartConfig.PopFromQueue(mspid) + + log.Info(fmt.Sprintf("Remaining restart queue(s) to reconcile: %s", queuesToString(restartConfig.Queues))) + updated = true + + continue + } + + // Scenario 3: the pod has not yet restarted but there is still time remaining + // to wait for the pod to restart. + + // To prevent the restart manager from overwritting the config map and losing + // data, the config map updates that trigger reconciles only occur every 10-30 + // seconds. If the lastCheckedInterval amount of time has not yet passed since + // the lastCheckedTimestamp, then we return true to tell the controllers to + // requeue the request to reconcile the restart config map to ensure that + // a reconcile will occur again even when the config map is not updated. + + lastCheckedInterval := time.Duration(randomInt(10, 30)) * time.Second + lastChecked, err := parseTime(component.LastCheckedTimestamp) + if err != nil { + return requeue, errors.Wrap(err, "failed to parse lastCheckedTimestamp") + } + + if lastChecked.Add(lastCheckedInterval).Before(time.Now()) { + component.LastCheckedTimestamp = time.Now().UTC().String() + updated = true + } else { + requeue = true + } + + default: + // Expired or Completed status - should not reach this case as Waiting case handles moving components to log + log.Info(fmt.Sprintf("%s restart status is %s, removing from %s restart queue", component.CRName, component.Status, mspid)) + + restartConfig.AddToLog(component) + restartConfig.PopFromQueue(mspid) + + updated = true + } + } + + if updated { + err = s.UpdateConfig(componentType, namespace, restartConfig) + if err != nil { + return requeue, err + } + } + + return requeue, nil +} + +func (s *StaggerRestartsService) GetConfig(componentType, namespace string) (*RestartConfig, error) { + cmName := fmt.Sprintf("%s-restart-config", componentType) + + cfg := &RestartConfig{ + Queues: map[string][]*Component{}, + } + err := s.ConfigMapManager.GetRestartConfigFrom(cmName, namespace, cfg) + if err != nil { + return nil, err + } + + return cfg, nil +} + +func (s *StaggerRestartsService) UpdateConfig(componentType, namespace string, cfg *RestartConfig) error { + cmName := fmt.Sprintf("%s-restart-config", componentType) + return s.ConfigMapManager.UpdateConfig(cmName, namespace, cfg) +} + +func (s *StaggerRestartsService) RestartDeployment(name, namespace string) error { + log.Info(fmt.Sprintf("Restarting deployment %s", name)) + + err := action.Restart(s.Client, name, namespace) + if err != nil { + return err + } + + return nil +} + +func (s *StaggerRestartsService) GetRunningPods(name, namespace string) ([]corev1.Pod, error) { + pods := []corev1.Pod{} + + labelSelector, err := labels.Parse(fmt.Sprintf("app=%s", name)) + if err != nil { + return pods, errors.Wrap(err, "failed to parse label selector for app name") + } + + listOptions := &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: namespace, + } + + podList := &corev1.PodList{} + err = s.Client.List(context.TODO(), podList, listOptions) + if err != nil { + log.Error(err, "failed to get pod list for %s", name) + // return empty pods list + // NOTE: decided not to return error here since this funtion will be called multiple + // times throughout the process of old pods terminating and new pods starting up. + // We don't want to error out prematurely if this client call isn't able to retrieve + // a list of pods during the restart process. + return pods, nil + } + + for _, pod := range podList.Items { + switch pod.Status.Phase { + case corev1.PodRunning: + containerStatuses := pod.Status.ContainerStatuses + + readyContainers := 0 + numContainers := len(containerStatuses) + + for _, status := range containerStatuses { + // TODO: is it required to check status.Ready? + if status.Ready && status.State.Running != nil { + readyContainers++ + } + } + if readyContainers == numContainers { + pods = append(pods, pod) + } + } + } + + return pods, nil +} + +func queuesToString(queues map[string][]*Component) string { + lst := []string{} + for org, queue := range queues { + str := org + ": [ " + if org == "" { + // This is a ca queue + str = "[ " + } + for _, comp := range queue { + str += comp.CRName + " " + } + str += " ]" + + lst = append(lst, str) + } + + return strings.Join(lst, ",") +} + +func parseTime(t string) (time.Time, error) { + format := "2006-01-02 15:04:05.999999999 -0700 MST" + return time.Parse(format, t) +} + +// Returns a random integer between min and max. +func randomInt(min, max int) int { + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(max-min))) + return int(randomNum.Int64()) + min +} diff --git a/pkg/restart/staggerrestarts/staggerrestarts_structs.go b/pkg/restart/staggerrestarts/staggerrestarts_structs.go new file mode 100644 index 00000000..ba0fcbae --- /dev/null +++ b/pkg/restart/staggerrestarts/staggerrestarts_structs.go @@ -0,0 +1,65 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package staggerrestarts + +// RestartConfig defines -restart-config.Data["restart-config.yaml"] +type RestartConfig struct { + // key is mspid + Queues map[string][]*Component + // key is instance name + Log map[string][]*Component +} + +type Status string + +const ( + Pending Status = "pending" + Waiting Status = "waiting" + Completed Status = "completed" + Expired Status = "expired" + + Restarted Status = "restarted" +) + +type Component struct { + CRName string + Reason string + CheckUntilTimestamp string + LastCheckedTimestamp string + Status Status + PodName string +} + +func (r *RestartConfig) AddToLog(component *Component) { + if r.Log == nil { + r.Log = map[string][]*Component{} + } + r.Log[component.CRName] = append(r.Log[component.CRName], component) +} + +func (r *RestartConfig) AddToQueue(mspid string, component *Component) { + if r.Queues == nil { + r.Queues = map[string][]*Component{} + } + r.Queues[mspid] = append(r.Queues[mspid], component) +} + +func (r *RestartConfig) PopFromQueue(mspid string) { + r.Queues[mspid] = r.Queues[mspid][1:] +} diff --git a/pkg/restart/staggerrestarts/staggerrestarts_suite_test.go b/pkg/restart/staggerrestarts/staggerrestarts_suite_test.go new file mode 100644 index 00000000..ea8d20b5 --- /dev/null +++ b/pkg/restart/staggerrestarts/staggerrestarts_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package staggerrestarts_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestStaggerrestarts(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Staggerrestarts Suite") +} diff --git a/pkg/restart/staggerrestarts/staggerrestarts_test.go b/pkg/restart/staggerrestarts/staggerrestarts_test.go new file mode 100644 index 00000000..9100fbe0 --- /dev/null +++ b/pkg/restart/staggerrestarts/staggerrestarts_test.go @@ -0,0 +1,389 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package staggerrestarts_test + +import ( + "context" + "encoding/json" + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + "github.com/IBM-Blockchain/fabric-operator/pkg/restart/staggerrestarts" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Staggerrestarts", func() { + + var ( + mockClient *controllermocks.Client + service *staggerrestarts.StaggerRestartsService + instance *current.IBPPeer + ) + + BeforeEach(func() { + mockClient = &controllermocks.Client{} + service = staggerrestarts.New(mockClient, 5*time.Minute) + + instance = ¤t.IBPPeer{} + instance.Name = "org1peer1" + instance.Namespace = "namespace" + instance.Spec.MSPID = "org1" + }) + + Context("add to queue", func() { + It("returns error if failed to get restart config", func() { + mockClient.GetReturns(errors.New("get error")) + err := service.AddToQueue(instance, "reason") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to add org1peer1 to queue")) + }) + + It("returns error if failed to update restart config", func() { + mockClient.CreateOrUpdateReturns(errors.New("update error")) + err := service.AddToQueue(instance, "reason") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to add org1peer1 to queue")) + }) + + It("adds restart request to queue in restart config", func() { + err := service.AddToQueue(instance, "reason") + Expect(err).NotTo(HaveOccurred()) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + Expect(len(cfg.Queues["org1"])).To(Equal(1)) + comp := cfg.Queues["org1"][0] + Expect(comp.CRName).To(Equal("org1peer1")) + Expect(comp.Reason).To(Equal("reason")) + Expect(comp.Status).To(Equal(staggerrestarts.Pending)) + + }) + }) + + Context("reconcile", func() { + var ( + restartConfig *staggerrestarts.RestartConfig + component1 *staggerrestarts.Component + component2 *staggerrestarts.Component + component3 *staggerrestarts.Component + + pod *corev1.Pod + ) + + BeforeEach(func() { + component1 = &staggerrestarts.Component{ + CRName: "org1peer1", + Reason: "migration", + Status: staggerrestarts.Pending, + } + component2 = &staggerrestarts.Component{ + CRName: "org1peer2", + Reason: "migration", + Status: staggerrestarts.Pending, + } + component3 = &staggerrestarts.Component{ + CRName: "org2peer1", + Reason: "migration", + Status: staggerrestarts.Pending, + } + + restartConfig = &staggerrestarts.RestartConfig{ + Queues: map[string][]*staggerrestarts.Component{ + "org1": {component1, component2}, + "org2": {component3}, + }, + } + + pod = &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: "pod1", + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + Phase: corev1.PodRunning, + }, + } + + bytes, err := json.Marshal(restartConfig) + Expect(err).NotTo(HaveOccurred()) + + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + o := obj.(*corev1.ConfigMap) + o.Name = ns.Name + o.Namespace = instance.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": bytes, + } + case *appsv1.Deployment: + o := obj.(*appsv1.Deployment) + o.Name = ns.Name + o.Namespace = instance.Namespace + } + + return nil + } + + mockClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{*pod} + } + return nil + } + }) + + Context("pending", func() { + It("returns empty pod list if failed to get running pods", func() { + mockClient.ListReturns(errors.New("list error")) + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + By("restarting first component in queue but not setting Pod Name", func() { + Expect(cfg.Queues["org1"][0].CRName).To(Equal("org1peer1")) + Expect(cfg.Queues["org1"][0].Status).To(Equal(staggerrestarts.Waiting)) + Expect(cfg.Queues["org1"][0].PodName).To(Equal("")) + }) + }) + + It("returns error if fails to restart deployment", func() { + mockClient.PatchReturns(errors.New("patch error")) + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to restart deployment")) + Expect(requeue).To(Equal(false)) + }) + + It("restarts deployment for pending component", func() { + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + By("restarting first component in org1 queue", func() { + Expect(cfg.Queues["org1"][0].CRName).To(Equal("org1peer1")) + Expect(cfg.Queues["org1"][0].Status).To(Equal(staggerrestarts.Waiting)) + Expect(cfg.Queues["org1"][0].PodName).To(Equal("pod1")) + Expect(cfg.Queues["org1"][0].LastCheckedTimestamp).NotTo(Equal("")) + Expect(cfg.Queues["org1"][0].CheckUntilTimestamp).NotTo(Equal("")) + }) + + By("restarting first component in org2 queue", func() { + Expect(cfg.Queues["org2"][0].CRName).To(Equal("org2peer1")) + Expect(cfg.Queues["org2"][0].Status).To(Equal(staggerrestarts.Waiting)) + Expect(cfg.Queues["org2"][0].PodName).To(Equal("pod1")) + Expect(cfg.Queues["org2"][0].LastCheckedTimestamp).NotTo(Equal("")) + Expect(cfg.Queues["org2"][0].CheckUntilTimestamp).NotTo(Equal("")) + }) + + }) + }) + + Context("waiting", func() { + var ( + originalLastChecked string + ) + BeforeEach(func() { + originalLastChecked = time.Now().Add(-35 * time.Second).UTC().String() + checkUntil := time.Now().Add(5 * time.Minute).UTC().String() + + component1.Status = staggerrestarts.Waiting + component1.LastCheckedTimestamp = originalLastChecked + component1.CheckUntilTimestamp = checkUntil + component1.PodName = "pod1" + + component3.Status = staggerrestarts.Waiting + component3.LastCheckedTimestamp = originalLastChecked + component3.CheckUntilTimestamp = checkUntil + component3.PodName = "pod1" + + // Make sure returned restartConfig contains updated components + bytes, err := json.Marshal(restartConfig) + Expect(err).NotTo(HaveOccurred()) + + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.ConfigMap) + o.Name = ns.Name + o.Namespace = instance.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": bytes, + } + + return nil + } + }) + + It("keeps components in Waiting status if unable to get list of pods", func() { + mockClient.ListReturns(errors.New("list error")) + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).NotTo(HaveOccurred()) + + By("returning false to requeue the restart reconcile request if LastCheckedTimestamp was last updated more than 10-30 seconds ago", func() { + Expect(requeue).To(Equal(false)) + }) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + By("keeping first component of each queue in Waiting status and updating LastCheckedTime", func() { + for _, q := range cfg.Queues { + comp := q[0] + Expect(comp.Status).To(Equal(staggerrestarts.Waiting)) + Expect(comp.PodName).To(Equal("pod1")) + Expect(comp.LastCheckedTimestamp).NotTo(Equal(originalLastChecked)) + } + }) + }) + + It("keeps components in Waiting status if there is more than one running pod for the instance", func() { + pod2 := pod.DeepCopy() + pod2.Name = "pod2" + mockClient.ListStub = func(ctx context.Context, obj client.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *corev1.PodList: + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{*pod, *pod2} + } + return nil + } + + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).NotTo(HaveOccurred()) + + By("returning false to requeue the restart reconcile request if LastCheckedTimestamp was last updated more than 10-30 seconds ago", func() { + Expect(requeue).To(Equal(false)) + }) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + By("keeping first component of each queue in Waiting status and updating LastCheckedTime", func() { + for _, q := range cfg.Queues { + comp := q[0] + Expect(comp.Status).To(Equal(staggerrestarts.Waiting)) + Expect(comp.PodName).To(Equal("pod1")) + Expect(comp.LastCheckedTimestamp).NotTo(Equal(originalLastChecked)) + } + }) + }) + + It("sets component to Completed and moves it to the log if pod has restarted", func() { + pod.Name = "newpod" + + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + By("removing the component from its queue", func() { + Expect(len(cfg.Queues["org1"])).To(Equal(1)) // now contains only org1peer2 + Expect(len(cfg.Queues["org2"])).To(Equal(0)) // now contains no peers + }) + + By("moving the component to the log and setting status to Completed", func() { + Expect(len(cfg.Log)).To(Equal(2)) // since both org1peer1 and org2peer1 restarted + Expect(len(cfg.Log["org1peer1"])).To(Equal(1)) + Expect(len(cfg.Log["org2peer1"])).To(Equal(1)) + + for _, components := range cfg.Log { + Expect(components[0].CRName).To(ContainSubstring("peer1")) + Expect(components[0].Status).To(Equal(staggerrestarts.Completed)) + } + }) + }) + + It("sets component to Expired and moves it to the log if pod has not restarted within timeout window", func() { + component1.CheckUntilTimestamp = time.Now().Add(-5 * time.Second).UTC().String() + bytes, err := json.Marshal(restartConfig) + Expect(err).NotTo(HaveOccurred()) + + mockClient.GetStub = func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { + o := obj.(*corev1.ConfigMap) + o.Name = ns.Name + o.Namespace = instance.Namespace + o.BinaryData = map[string][]byte{ + "restart-config.yaml": bytes, + } + + return nil + } + + requeue, err := service.Reconcile("peer", "namespace") + Expect(err).NotTo(HaveOccurred()) + Expect(requeue).To(Equal(false)) + + _, cm, _ := mockClient.CreateOrUpdateArgsForCall(0) + cfg := getRestartConfig(cm.(*corev1.ConfigMap)) + + By("removing org1peer1 from its queue", func() { + Expect(len(cfg.Queues["org1"])).To(Equal(1)) // now contains only org1peer2 + }) + + By("keeping org2peer1 in its queue as it's timeout window has not expired yet", func() { + Expect(len(cfg.Queues["org2"])).To(Equal(1)) + }) + + By("moving org1peer1 to the log and setting Status to Expired", func() { + Expect(len(cfg.Log["org1peer1"])).To(Equal(1)) + comp1 := cfg.Log["org1peer1"][0] + Expect(comp1.CRName).To(Equal("org1peer1")) + Expect(comp1.Status).To(Equal(staggerrestarts.Expired)) + }) + }) + }) + }) +}) + +func getRestartConfig(cm *corev1.ConfigMap) *staggerrestarts.RestartConfig { + cfgBytes := cm.BinaryData["restart-config.yaml"] + cfg := &staggerrestarts.RestartConfig{} + err := json.Unmarshal(cfgBytes, cfg) + Expect(err).NotTo(HaveOccurred()) + + return cfg +} diff --git a/pkg/util/image/image.go b/pkg/util/image/image.go new file mode 100644 index 00000000..13a04dbb --- /dev/null +++ b/pkg/util/image/image.go @@ -0,0 +1,62 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package image + +import ( + "fmt" + "strings" +) + +func Format(image, tag string) string { + if !strings.HasPrefix(tag, "sha256:") { + return fmt.Sprintf("%s:%s", image, tag) + } else { + return fmt.Sprintf("%s@%s", image, tag) + } +} + +func GetImage(registryURL, image, requestedImage string) string { + // if requested image is passed use it + // else fallback to using default image + if requestedImage != "" { + image = requestedImage + } + + if image != "" { + // if registry url is empty or set to `no-registry-url` return image as is + if registryURL == "" || registryURL == "no-registry-url" || registryURL == "no-registry-url/" { + // use the image as is + return image + } + // else pre-pend registry url to image + image = registryURL + image + } + + return image +} + +func GetTag(arch, tag, requestedTag string) string { + // if override is passed return it + // else return default + if requestedTag != "" { + return requestedTag + } + + return tag +} diff --git a/pkg/util/merge/merge.go b/pkg/util/merge/merge.go new file mode 100644 index 00000000..fd57acc4 --- /dev/null +++ b/pkg/util/merge/merge.go @@ -0,0 +1,61 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package merge + +import ( + "reflect" + + "github.com/imdario/mergo" +) + +// BoolTransformer will overwrite the behavior of merging boolean pointers such that +// a pointer to 'false' is not considered an empty value. Therefore, if the src's +// boolean pointer is not nil, it should overwrite the dst's boolean pointer value. +// +// This is required because the default behavior of mergo is to treat a pointer to 'false' +// as an empty value, which prevents boolean fields to be set from 'true' to 'false' if needed. +type BoolTransformer struct{} + +func (t BoolTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + falseVal := false + if typ == reflect.TypeOf(&falseVal) { + return func(dst, src reflect.Value) error { + if dst.CanSet() && !src.IsNil() { + dst.Set(src) + } + return nil + } + } + return nil +} + +// TODO: Can add transformers for other primitive types (i.e. int, string) if we run into +// issues setting non-empty primitive fields back to empty values - see unit tests for +// use cases. + +// WithOverwrite encapsulates mergo's implementation of MergeWithOverwrite with our +// custom transformers. +func WithOverwrite(dst interface{}, src interface{}) error { + err := mergo.MergeWithOverwrite(dst, src, mergo.WithTransformers(BoolTransformer{})) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/util/merge/merge_suite_test.go b/pkg/util/merge/merge_suite_test.go new file mode 100644 index 00000000..10fc32a6 --- /dev/null +++ b/pkg/util/merge/merge_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package merge_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestMerge(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Merge Suite") +} diff --git a/pkg/util/merge/merge_test.go b/pkg/util/merge/merge_test.go new file mode 100644 index 00000000..867c7b40 --- /dev/null +++ b/pkg/util/merge/merge_test.go @@ -0,0 +1,216 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package merge_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Merge", func() { + + var ( + dst *Test + src *Test + + trueVal = true + falseVal = false + ) + + BeforeEach(func() { + dst = &Test{ + String: "string", + Int: 1, + Bool: false, + BoolPtr: &falseVal, + } + + src = &Test{} + }) + + Context("WithOverride", func() { + Context("string", func() { + When("src is not an empty string", func() { + It("merges string field by overwriting dst with src", func() { + src.String = "test" + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.String).To(Equal("test")) + }) + }) + + When("src is an empty string", func() { + // NOTE: This is the expected behavior as defined by mergo.MergeWithOverwrite. + // If we allow empty values to be merged, then all instances of empty src attributes + // would overwrite both non-empty and empty dst attributes, which would possible + // overwrite dst fields we didn't want set back to an empty value. + It("does not merge string field", func() { + src.String = "" + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.String).To(Equal("string")) + }) + }) + }) + + Context("int", func() { + When("src is not an empty value (0)", func() { + It("merges int field by overwriting dst with src", func() { + src.Int = 2 + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.Int).To(Equal(2)) + }) + }) + + When("src is an empty value (0)", func() { + // NOTE: This is the expected behavior as defined by mergo.MergeWithOverwrite. + // If we allow empty values to be merged, then all instances of empty src attributes + // would overwrite both non-empty and empty dst attributes, which would possible + // overwrite dst fields we didn't want set back to an empty value. + It("does not merge int field", func() { + src.Int = 0 + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.Int).To(Equal(1)) + }) + }) + }) + + Context("bool", func() { + When("src is not an empty value (i.e. true)", func() { + It("merges bool field by overwriting dst with src", func() { + src.Bool = true + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.Bool).To(Equal(true)) + }) + }) + + When("src is an empty value (i.e. false)", func() { + // NOTE: This is the expected behavior as defined by mergo.MergeWithOverwrite. + // If we allow empty values to be merged, then all instances of empty src attributes + // would overwrite both non-empty and empty dst attributes, which would possible + // overwrite dst fields we didn't want set back to an empty value. + It("does not merge bool field", func() { + dst.Bool = true + src.Bool = false + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.Bool).To(Equal(true)) + }) + }) + }) + + Context("bool pointer", func() { + When("src is a pointer to 'true'", func() { + BeforeEach(func() { + // Reset dst and src to avoid issues with bool pointers + // unintentially persisting through test suite + dst = &Test{} + src = &Test{} + }) + + It("merges bool pointer field by overwriting non-nil dst with src", func() { + src.BoolPtr = &trueVal + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(*dst.BoolPtr).To(Equal(true)) + }) + It("merges bool pointer field by overwriting nil dst with src", func() { + dst.BoolPtr = nil + src.BoolPtr = &trueVal + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(*dst.BoolPtr).To(Equal(true)) + }) + }) + + When("src is a pointer to 'false'", func() { + BeforeEach(func() { + // Reset dst and src to avoid issues with bool pointers + // unintentially persisting through test suite + dst = &Test{} + src = &Test{} + }) + + It("merges bool pointer to field by overwriting non-nil dst with src", func() { + dst.BoolPtr = &trueVal + src.BoolPtr = &falseVal + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(*dst.BoolPtr).To(Equal(false)) + }) + + It("merges bool pointer field by ovewriting nil dst with src", func() { + dst.BoolPtr = nil + src.BoolPtr = &falseVal + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(*dst.BoolPtr).To(Equal(false)) + }) + + It("merges bool pointer field only if pointer is not nil in src", func() { + dst = &Test{ + BoolPtr: &trueVal, + BoolTest: BoolTest{ + BoolPtrA: &trueVal, + }, + } + src = &Test{ + BoolTest: BoolTest{ + BoolPtrA: &falseVal, + }, + } + + err := merge.WithOverwrite(dst, src) + Expect(err).NotTo(HaveOccurred()) + Expect(*dst.BoolTest.BoolPtrA).To(Equal(false)) + Expect(*dst.BoolPtr).To(Equal(true)) + Expect(dst.BoolTest.BoolPtrB).To(BeNil()) + }) + }) + }) + + }) +}) + +type Test struct { + String string + Int int + Bool bool + BoolPtr *bool + BoolTest BoolTest +} + +type BoolTest struct { + BoolPtrA *bool + BoolPtrB *bool +} diff --git a/pkg/util/pointer/pointer.go b/pkg/util/pointer/pointer.go new file mode 100644 index 00000000..2c974d89 --- /dev/null +++ b/pkg/util/pointer/pointer.go @@ -0,0 +1,28 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package pointer + +func True() *bool { + trueVal := true + return &trueVal +} + +func False() *bool { + falseVal := false + return &falseVal +} diff --git a/pkg/util/testdata/invalid_kind.yaml b/pkg/util/testdata/invalid_kind.yaml new file mode 100644 index 00000000..0e8add06 --- /dev/null +++ b/pkg/util/testdata/invalid_kind.yaml @@ -0,0 +1 @@ +kind: 2 diff --git a/pkg/util/util.go b/pkg/util/util.go new file mode 100644 index 00000000..f5671b0d --- /dev/null +++ b/pkg/util/util.go @@ -0,0 +1,940 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util + +import ( + "context" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/clientset" + routev1 "github.com/openshift/api/route/v1" + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/rest" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + yaml1 "sigs.k8s.io/yaml" +) + +const ( + maximumCRNameLength = 32 +) + +func ConvertYamlFileToJson(file string) ([]byte, error) { + absfilepath, err := filepath.Abs(file) + if err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Clean(absfilepath)) + if err != nil { + return nil, err + } + + return yaml.ToJSON(bytes) +} + +func GetContainerFromFile(file string) (*corev1.Container, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + cont := &corev1.Container{} + err = json.Unmarshal(jsonBytes, &cont) + if err != nil { + return nil, err + } + + return cont, nil +} + +func GetPVCFromFile(file string) (*corev1.PersistentVolumeClaim, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + pvc := &corev1.PersistentVolumeClaim{} + err = json.Unmarshal(jsonBytes, &pvc) + if err != nil { + return nil, err + } + + return pvc, nil +} + +func GetRoleFromFile(file string) (*rbacv1.Role, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + role := &rbacv1.Role{} + err = json.Unmarshal(jsonBytes, &role) + if err != nil { + return nil, err + } + + return role, nil +} + +func GetClusterRoleFromFile(file string) (*rbacv1.ClusterRole, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + role := &rbacv1.ClusterRole{} + err = json.Unmarshal(jsonBytes, &role) + if err != nil { + return nil, err + } + + return role, nil +} + +func GetRoleBindingFromFile(file string) (*rbacv1.RoleBinding, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + rolebinding := &rbacv1.RoleBinding{} + err = json.Unmarshal(jsonBytes, &rolebinding) + if err != nil { + return nil, err + } + + return rolebinding, nil +} + +func GetClusterRoleBindingFromFile(file string) (*rbacv1.ClusterRoleBinding, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + rolebinding := &rbacv1.ClusterRoleBinding{} + err = json.Unmarshal(jsonBytes, &rolebinding) + if err != nil { + return nil, err + } + + return rolebinding, nil +} + +func GetServiceAccountFromFile(file string) (*corev1.ServiceAccount, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + serviceaccount := &corev1.ServiceAccount{} + err = json.Unmarshal(jsonBytes, &serviceaccount) + if err != nil { + return nil, err + } + + return serviceaccount, nil +} + +func GetDeploymentFromFile(file string) (*appsv1.Deployment, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + dep := &appsv1.Deployment{} + err = json.Unmarshal(jsonBytes, &dep) + if err != nil { + return nil, err + } + + return dep, nil +} + +func GetServiceFromFile(file string) (*corev1.Service, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + svc := &corev1.Service{} + err = json.Unmarshal(jsonBytes, &svc) + if err != nil { + return nil, err + } + + return svc, nil +} + +func GetConfigMapFromFile(file string) (*corev1.ConfigMap, error) { + absfilepath, err := filepath.Abs(file) + if err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Clean(absfilepath)) + if err != nil { + return nil, err + } + cm := &corev1.ConfigMap{} + err = yaml1.Unmarshal(bytes, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func GetRouteFromFile(file string) (*routev1.Route, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + route := &routev1.Route{} + err = json.Unmarshal(jsonBytes, &route) + if err != nil { + return nil, err + } + + return route, nil +} + +func GetIngressFromFile(file string) (*networkingv1.Ingress, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + ingress := &networkingv1.Ingress{} + err = json.Unmarshal(jsonBytes, &ingress) + if err != nil { + return nil, err + } + + return ingress, nil +} + +func GetIngressv1beta1FromFile(file string) (*networkingv1beta1.Ingress, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + ingress := &networkingv1beta1.Ingress{} + err = json.Unmarshal(jsonBytes, &ingress) + if err != nil { + return nil, err + } + + return ingress, nil +} + +func GetSecretFromFile(file string) (*corev1.Secret, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + secret := &corev1.Secret{} + err = json.Unmarshal(jsonBytes, &secret) + if err != nil { + return nil, err + } + + return secret, nil +} + +func GetCRDFromFile(file string) (*extv1.CustomResourceDefinition, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + crd := &extv1.CustomResourceDefinition{} + err = json.Unmarshal(jsonBytes, &crd) + if err != nil { + return nil, err + } + + return crd, nil +} + +func GetPodFromFile(file string) (*corev1.Pod, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + pod := &corev1.Pod{} + err = json.Unmarshal(jsonBytes, &pod) + if err != nil { + return nil, err + } + + return pod, nil +} + +func GetResourcePatch(current, new *corev1.ResourceRequirements) (*corev1.ResourceRequirements, error) { + currentBytes, err := json.Marshal(current) + if err != nil { + return nil, err + } + + newBytes, err := json.Marshal(new) + if err != nil { + return nil, err + } + + patchBytes, err := strategicpatch.StrategicMergePatch(currentBytes, newBytes, corev1.ResourceRequirements{}) + if err != nil { + return nil, err + } + + update := &corev1.ResourceRequirements{} + err = json.Unmarshal(patchBytes, update) + if err != nil { + return nil, err + } + + return update, nil +} + +func IgnoreAlreadyExistError(err error) error { + if !strings.Contains(err.Error(), "already exists") { + return err + } + return nil +} + +// Ignore benign error +func IgnoreOutdatedResourceVersion(err error) error { + if err == nil { + return nil + } + + if !strings.Contains(err.Error(), "please apply your changes to the latest version and try again") { + return err + } + + return nil +} + +func EnvExists(envs []corev1.EnvVar, key string) bool { + for _, ele := range envs { + if ele.Name == key { + return true + } + } + return false +} + +func GetEnvValue(envs []corev1.EnvVar, key string) string { + for _, ele := range envs { + if ele.Name == key { + return ele.Value + } + } + return "" +} + +func ReplaceEnvIfDiff(envs []corev1.EnvVar, key, replace string) ([]corev1.EnvVar, bool) { + var updated bool + for _, ele := range envs { + if ele.Name == key { + oldValue := ele.Value + if oldValue != replace { + envs = UpdateEnvVar(ele.Name, replace, envs) + updated = true + } + } + } + return envs, updated +} + +func AppendStringIfMissing(array []string, newEle string) []string { + for _, ele := range array { + if ele == newEle { + return array + } + } + return append(array, newEle) +} + +func AppendEnvIfMissing(envs []corev1.EnvVar, env corev1.EnvVar) []corev1.EnvVar { + for _, ele := range envs { + if ele.Name == env.Name { + return envs + } + } + return append(envs, env) +} + +func AppendPullSecretIfMissing(pullSecrets []corev1.LocalObjectReference, pullSecret string) []corev1.LocalObjectReference { + for _, ps := range pullSecrets { + if ps.Name == pullSecret { + return pullSecrets + } + } + return append(pullSecrets, corev1.LocalObjectReference{Name: pullSecret}) +} + +func AppendEnvIfMissingOverrideIfPresent(envs []corev1.EnvVar, env corev1.EnvVar) []corev1.EnvVar { + for index, ele := range envs { + if ele.Name == env.Name { + ele.Value = env.Value + envs[index] = ele + return envs + } + } + return append(envs, env) +} + +func AppendConfigMapFromSourceIfMissing(envFroms []corev1.EnvFromSource, envFrom corev1.EnvFromSource) []corev1.EnvFromSource { + for _, ele := range envFroms { + if ele.ConfigMapRef.Name == envFrom.ConfigMapRef.Name { + return envFroms + } + } + return append(envFroms, envFrom) +} + +func AppendVolumeIfMissing(volumes []corev1.Volume, volume corev1.Volume) []corev1.Volume { + for _, v := range volumes { + if v.Name == volume.Name { + return volumes + } + } + return append(volumes, volume) +} + +func AppendVolumeMountIfMissing(volumeMounts []corev1.VolumeMount, volumeMount corev1.VolumeMount) []corev1.VolumeMount { + for _, v := range volumeMounts { + if v.Name == volumeMount.Name { + if v.MountPath == volumeMount.MountPath { + return volumeMounts + } + } + } + return append(volumeMounts, volumeMount) +} + +func AppendVolumeMountWithSubPathIfMissing(volumeMounts []corev1.VolumeMount, volumeMount corev1.VolumeMount) []corev1.VolumeMount { + for _, v := range volumeMounts { + if v.Name == volumeMount.Name { + if v.SubPath == volumeMount.SubPath { + return volumeMounts + } + } + } + return append(volumeMounts, volumeMount) +} + +func AppendContainerIfMissing(containers []corev1.Container, container corev1.Container) []corev1.Container { + for _, c := range containers { + if c.Name == container.Name { + return containers + } + } + return append(containers, container) +} + +func AppendImagePullSecretIfMissing(imagePullSecrets []corev1.LocalObjectReference, imagePullSecret corev1.LocalObjectReference) []corev1.LocalObjectReference { + if imagePullSecret.Name == "" { + return imagePullSecrets + } + for _, i := range imagePullSecrets { + if i.Name == imagePullSecret.Name { + return imagePullSecrets + } + } + return append(imagePullSecrets, imagePullSecret) +} + +func UpdateEnvVar(name string, value string, envs []corev1.EnvVar) []corev1.EnvVar { + newEnvs := []corev1.EnvVar{} + for _, e := range envs { + if e.Name == name { + e.Value = value + } + newEnvs = append(newEnvs, e) + } + return newEnvs +} + +func ValidationChecks(typedata metav1.TypeMeta, metadata metav1.ObjectMeta, expectedKind string, maxNameLength *int) error { + maxlength := maximumCRNameLength + + if maxNameLength != nil { + maxlength = *maxNameLength + } + + if len(metadata.Name) > maxlength { + return fmt.Errorf("The instance name '%s' is too long, the name must be less than or equal to %d characters", metadata.Name, maxlength) + } + + if typedata.Kind != "" { + if typedata.Kind != expectedKind { + return fmt.Errorf("The instance '%s' is of kind %s not an %s kind resource, please check to make sure there are no name collisions across resources", metadata.Name, typedata.Kind, expectedKind) + } + } + + return nil +} + +func SelectRandomValue(values []string) string { + if len(values) == 0 { + return "" + } + randValue, _ := rand.Int(rand.Reader, big.NewInt(int64(len(values)))) + return values[randValue.Int64()] +} + +type Client interface { + Get(ctx context.Context, namespacedName types.NamespacedName, obj k8sclient.Object) error + List(ctx context.Context, list k8sclient.ObjectList, opts ...k8sclient.ListOption) error +} + +func GetZone(client Client) string { + nodeList := &corev1.NodeList{} + err := client.List(context.TODO(), nodeList) + if err != nil { + return "" + } + + zones := []string{} + for _, node := range nodeList.Items { + zone := node.ObjectMeta.Labels["topology.kubernetes.io/zone"] + zones = append(zones, zone) + } + + return SelectRandomValue(zones) +} + +func GetRegion(client Client) string { + nodeList := &corev1.NodeList{} + err := client.List(context.TODO(), nodeList) + if err != nil { + return "" + } + + regions := []string{} + for _, node := range nodeList.Items { + region := node.ObjectMeta.Labels["topology.kubernetes.io/region"] + regions = append(regions, region) + } + + return SelectRandomValue(regions) +} + +func ContainsValue(find string, in []string) bool { + for _, value := range in { + if find == value { + return true + } + } + return false +} + +func ValidateZone(client Client, requestedZone string) error { + nodeList := &corev1.NodeList{} + err := client.List(context.TODO(), nodeList) + if err != nil { + return nil + } + zones := []string{} + for _, node := range nodeList.Items { + zone := node.ObjectMeta.Labels["topology.kubernetes.io/zone"] + zones = append(zones, zone) + zone = node.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"] + zones = append(zones, zone) + zone = node.ObjectMeta.Labels["ibm-cloud.kubernetes.io/zone"] + zones = append(zones, zone) + } + valueFound := ContainsValue(requestedZone, zones) + if !valueFound { + return errors.Errorf("Zone '%s' is not a valid zone", requestedZone) + } + return nil +} + +func ValidateRegion(client Client, requestedRegion string) error { + nodeList := &corev1.NodeList{} + err := client.List(context.TODO(), nodeList) + if err != nil { + return nil + } + regions := []string{} + for _, node := range nodeList.Items { + region := node.ObjectMeta.Labels["topology.kubernetes.io/region"] + regions = append(regions, region) + region = node.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/region"] + regions = append(regions, region) + region = node.ObjectMeta.Labels["ibm-cloud.kubernetes.io/region"] + regions = append(regions, region) + } + valueFound := ContainsValue(requestedRegion, regions) + if !valueFound { + return errors.Errorf("Region '%s' is not a valid region", requestedRegion) + } + return nil +} + +func FileExists(path string) bool { + if _, err := os.Stat(path); err == nil { + return true + } + return false +} + +func EnsureDir(dirName string) error { + err := os.MkdirAll(dirName, 0750) + + if err == nil || os.IsExist(err) { + return nil + } else { + return err + } +} + +func GetResourceVerFromSecret(client Client, name, namespace string) (string, error) { + secret := &corev1.Secret{} + err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, secret) + if err != nil { + return "", err + } + + resourceVer := secret.ObjectMeta.ResourceVersion + return resourceVer, nil +} + +func JoinMaps(m1, m2 map[string][]byte) map[string][]byte { + joined := map[string][]byte{} + + if m1 != nil { + for k, v := range m1 { + joined[k] = v + } + } + + if m2 != nil { + for k, v := range m2 { + joined[k] = v + } + } + + return joined +} + +func PemStringToBytes(pem string) []byte { + return []byte(pem) +} + +func FileToBytes(file string) ([]byte, error) { + data, err := ioutil.ReadFile(filepath.Clean(file)) + if err != nil { + return nil, errors.Wrapf(err, "failed to read file %s", file) + } + + return data, nil +} + +func Base64ToBytes(base64str string) ([]byte, error) { + data, err := base64.StdEncoding.DecodeString(base64str) + if err != nil { + // If base64 encoded string is padded with too many '=' at the + // end DecodeString will fail with error: "illegal base64 data at input byte ...". + // Need to try stripping of '=' at the one at a time and trying again until no more '=' + // left at that point return err. + + if strings.HasSuffix(base64str, "=") { + base64str = base64str[:len(base64str)-1] + return Base64ToBytes(base64str) + } + return nil, errors.Wrapf(err, "failed to parse base64 string %s", base64str) + } + + return data, nil +} + +func BytesToBase64(b []byte) string { + data := base64.StdEncoding.EncodeToString(b) + + return data +} + +func GetCertificateFromPEMBytes(bytes []byte) (*x509.Certificate, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, errors.New("failed to decode PEM bytes") + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.Wrap(err, "failed to parse certificate") + } + + return cert, nil +} + +func WriteFile(file string, buf []byte, perm os.FileMode) error { + dir := path.Dir(file) + // Create the directory if it doesn't exist + if _, err := os.Stat(dir); os.IsNotExist(err) { + err = os.MkdirAll(dir, 0750) + if err != nil { + return errors.Wrapf(err, "Failed to create directory '%s' for file '%s'", dir, file) + } + } + return ioutil.WriteFile(file, buf, perm) +} + +func CheckIfZoneOrRegionUpdated(oldValue string, newValue string) bool { + if (strings.ToLower(oldValue) != "select" && oldValue != "") && (strings.ToLower(newValue) != "select" && newValue != "") { + if oldValue != newValue { + return true + } + } + + return false +} + +func GenerateRandomString(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + + b := make([]byte, length) + for i := range b { + num, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + b[i] = charset[num.Int64()] + } + return string(b) +} + +func ValidateHSMProxyURL(endpoint string) error { + parsedURL, err := url.Parse(endpoint) + if err != nil { + return err + } + + address := strings.Split(parsedURL.Host, ":") + if len(address) < 2 { + return errors.New("must specify both IP address and port") + } + + if address[0] == "" { + return errors.New("missing IP address") + } + + if address[1] == "" { + return errors.New("missing port") + } + + scheme := parsedURL.Scheme + if scheme != "tls" && scheme != "tcp" { + return fmt.Errorf("unsupported scheme '%s', only tcp and tls are supported", scheme) + } + + if !IsTCPReachable(parsedURL.Host) { + return fmt.Errorf("Unable to reach HSM endpoint: %s", parsedURL.Host) + } + return nil +} + +// func HealthCheck(caURL *url.URL, cert []byte) error { +func HealthCheck(healthURL string, cert []byte, timeout time.Duration) error { + rootCertPool := x509.NewCertPool() + rootCertPool.AppendCertsFromPEM(cert) + + transport := http.DefaultTransport + transport.(*http.Transport).TLSClientConfig = &tls.Config{ + RootCAs: rootCertPool, + MinVersion: tls.VersionTLS12, // TLS 1.2 recommended, TLS 1.3 (current latest version) encouraged + } + + client := http.Client{ + Transport: &http.Transport{ + IdleConnTimeout: timeout, + Dial: (&net.Dialer{ + Timeout: timeout, + KeepAlive: timeout, + }).Dial, + TLSHandshakeTimeout: timeout / 2, + TLSClientConfig: &tls.Config{ + RootCAs: rootCertPool, + MinVersion: tls.VersionTLS12, // TLS 1.2 recommended, TLS 1.3 (current latest version) encouraged + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, healthURL, nil) + if err != nil { + return errors.Wrap(err, "invalid http request") + } + + resp, err := client.Do(req) + if err != nil { + return errors.Wrapf(err, "health check request failed") + } + + if resp.StatusCode != http.StatusOK { + return errors.Wrapf(err, "failed health check, ca is not running") + } + + return nil +} + +func IsTCPReachable(url string) bool { + url = strings.Replace(url, "tcp://", "", -1) + url = strings.Replace(url, "tls://", "", -1) + + conn, err := net.Dial("tcp", url) + if err != nil { + return false + } + + defer conn.Close() + + return true +} + +func IntermediateSecretExists(client Client, namespace, secretName string) bool { + err := client.Get(context.TODO(), types.NamespacedName{ + Name: secretName, + Namespace: namespace}, &corev1.Secret{}) + if err != nil { + return false + } + + return true +} + +func IsSecretTLSCert(secretName string) bool { + if strings.HasSuffix(secretName, "-signcert") { + return strings.HasPrefix(secretName, "tls") + } else if strings.HasSuffix(secretName, "-ca-crypto") { + return true + } + + return false +} + +func IsSecretEcert(secretName string) bool { + if strings.HasSuffix(secretName, "-signcert") { + return strings.HasPrefix(secretName, "ecert") + } + + return false +} + +func ConvertSpec(in interface{}, out interface{}) error { + jsonBytes, err := yaml1.Marshal(in) + if err != nil { + return err + } + + err = yaml1.Unmarshal(jsonBytes, out) + if err != nil { + return err + } + return nil +} + +func FindStringInArray(str string, slice []string) bool { + for _, item := range slice { + if item == str { + return true + } + } + return false +} + +func ConvertToJsonMessage(in interface{}) (*json.RawMessage, error) { + bytes, err := json.Marshal(in) + if err != nil { + return nil, err + } + + jm := json.RawMessage(bytes) + return &jm, + + nil +} + +func GetNetworkPolicyFromFile(file string) (*networkingv1.NetworkPolicy, error) { + jsonBytes, err := ConvertYamlFileToJson(file) + if err != nil { + return nil, err + } + + policy := &networkingv1.NetworkPolicy{} + err = json.Unmarshal(jsonBytes, &policy) + if err != nil { + return nil, err + } + + return policy, nil +} + +func GetServerVersion() (*version.Info, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get cluster config") + } + + clientSet, err := clientset.New(config) + if err != nil { + return nil, errors.Wrap(err, "failed to get client") + } + + version, err := clientSet.DiscoveryClient.ServerVersion() + if err != nil { + return nil, errors.Wrap(err, "failed to get version") + } + return version, nil +} diff --git a/pkg/util/util_suite_test.go b/pkg/util/util_suite_test.go new file mode 100644 index 00000000..71e17126 --- /dev/null +++ b/pkg/util/util_suite_test.go @@ -0,0 +1,46 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util_test + +import ( + "net" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestUtil(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Util Suite") +} + +var ( + ln net.Listener +) + +var _ = BeforeSuite(func() { + var err error + ln, err = net.Listen("tcp", "0.0.0.0:2348") + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + ln.Close() +}) diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go new file mode 100644 index 00000000..eb1ffcb8 --- /dev/null +++ b/pkg/util/util_test.go @@ -0,0 +1,399 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util_test + +import ( + "errors" + + "github.com/IBM-Blockchain/fabric-operator/pkg/util" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Util", func() { + + Context("Convert yaml file to json", func() { + It("returns an error if file does not exist", func() { + _, err := util.ConvertYamlFileToJson("fake.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no such file")) + }) + + It("returns an error if yaml file is not properly formatted", func() { + _, err := util.ConvertYamlFileToJson("testdata/bad.yaml") + Expect(err).To(HaveOccurred()) + }) + + It("return a byte arrary if the file exists and is a valid yaml file", func() { + bytes, err := util.ConvertYamlFileToJson("../../definitions/peer/pvc.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(len(bytes)).NotTo(Equal(0)) + }) + }) + + Context("GetPVCFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetPVCFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with PVC configuration and unmarshals into a struct", func() { + pvc, err := util.GetPVCFromFile("../../definitions/peer/pvc.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(pvc).NotTo(BeNil()) + }) + }) + + Context("GetDeploymentFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetDeploymentFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with Deployment configuration and unmarshals into a struct", func() { + dep, err := util.GetDeploymentFromFile("../../definitions/peer/deployment.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(dep).NotTo(BeNil()) + }) + }) + + Context("GetServiceFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetServiceFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with Service configuration and unmarshals into a struct", func() { + srvc, err := util.GetServiceFromFile("../../definitions/peer/service.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(srvc).NotTo(BeNil()) + }) + }) + + Context("GetSecretFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetSecretFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + It("reads file with Service configuration and unmarshals into a struct", func() { + srvc, err := util.GetSecretFromFile("../../testdata/secret.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(srvc).NotTo(BeNil()) + }) + }) + + Context("GetIngressFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetIngressFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + }) + + Context("GetIngressv1beta1FromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetIngressv1beta1FromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + }) + + Context("GetRoleFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetRoleFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with Role configuration and unmarshals into a struct", func() { + srvc, err := util.GetRoleFromFile("../../definitions/peer/role.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(srvc).NotTo(BeNil()) + }) + }) + + Context("GetRoleBindingFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetRoleBindingFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with RoleBinding configuration and unmarshals into a struct", func() { + srvc, err := util.GetRoleBindingFromFile("../../definitions/peer/rolebinding.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(srvc).NotTo(BeNil()) + }) + }) + + Context("GetServiceAccountFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetServiceAccountFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with SA configuration and unmarshals into a struct", func() { + srvc, err := util.GetServiceAccountFromFile("../../definitions/peer/serviceaccount.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(srvc).NotTo(BeNil()) + }) + }) + + Context("GetCRDFromFile", func() { + It("returns an error if config is incorrectly defined", func() { + _, err := util.GetCRDFromFile("testdata/invalid_kind.yaml") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot unmarshal")) + }) + + It("reads file with CRD configuration and unmarshals into a struct", func() { + srvc, err := util.GetCRDFromFile("../../config/crd/bases/ibp.com_ibpcas.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(srvc).NotTo(BeNil()) + }) + }) + + Context("GetResourcePatch", func() { + It("return resource struct that retains values that have not been modified and contains new values for if values are updated", func() { + + resourceList := make(corev1.ResourceList) + resourceList[corev1.ResourceCPU] = resource.MustParse("0.5") + resourceList[corev1.ResourceMemory] = resource.MustParse("5Gi") + resourceList[corev1.ResourceEphemeralStorage] = resource.MustParse("1Gi") + + current := &corev1.ResourceRequirements{ + Requests: resourceList, + } + + resourceList[corev1.ResourceCPU] = resource.MustParse("0.7") + new := &corev1.ResourceRequirements{ + Requests: resourceList, + } + + patched, err := util.GetResourcePatch(current, new) + Expect(err).NotTo(HaveOccurred()) + + cpu := patched.Requests[corev1.ResourceCPU] + Expect(cpu.String()).To(Equal("700m")) + memory := patched.Requests[corev1.ResourceMemory] + Expect(memory.String()).To(Equal("5Gi")) + ephermalStorage := patched.Requests[corev1.ResourceEphemeralStorage] + Expect(ephermalStorage.String()).To(Equal("1Gi")) + }) + }) + + Context("already exists error", func() { + It("returns error if it is not an already exists error", func() { + err := util.IgnoreAlreadyExistError(errors.New("failed to create resource")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed to create resource")) + }) + + It("does not return error if an already exists error", func() { + err := util.IgnoreAlreadyExistError(errors.New("resource already exists")) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update existing env var", func() { + var envs []corev1.EnvVar + + BeforeEach(func() { + env := corev1.EnvVar{ + Name: "GENERATE_GENESIS", + Value: "false", + } + envs = append(envs, env) + }) + + It("updates env var if found in slice", func() { + newEnvs := util.UpdateEnvVar("GENERATE_GENESIS", "true", envs) + Expect(newEnvs[0].Value).To(Equal("true")) + }) + }) + + Context("env exists", func() { + var envs []corev1.EnvVar + + BeforeEach(func() { + env := corev1.EnvVar{ + Name: "GENERATE_GENESIS", + Value: "false", + } + envs = append(envs, env) + + env = corev1.EnvVar{ + Name: "TEST_NAME", + Value: "false", + } + envs = append(envs, env) + }) + + It("returns true if found in slice", func() { + exists := util.EnvExists(envs, "TEST_NAME") + Expect(exists).To(Equal(true)) + }) + + It("returns false if not found in slice", func() { + exists := util.EnvExists(envs, "FAKE_NAME") + Expect(exists).To(Equal(false)) + }) + }) + + Context("replaces (updates) env if diff", func() { + var envs []corev1.EnvVar + + BeforeEach(func() { + env := corev1.EnvVar{ + Name: "GENERATE_GENESIS", + Value: "false", + } + envs = append(envs, env) + + env = corev1.EnvVar{ + Name: "TEST_NAME", + Value: "false", + } + envs = append(envs, env) + }) + + It("returns env with updated replaced value", func() { + key := "TEST_NAME" + replace := "true" + newEnvs, _ := util.ReplaceEnvIfDiff(envs, key, replace) + Expect(newEnvs[1].Value).To(Equal("true")) + }) + }) + + Context("Resource Validation", func() { + It("returns an error if controller handling the request is reconciling a resource of different type", func() { + typemeta := metav1.TypeMeta{ + Kind: "NOTIBPCA", + } + maxNameLength := 50 + err := util.ValidationChecks(typemeta, metav1.ObjectMeta{}, "IBPCA", &maxNameLength) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not an IBPCA kind resource, please check to make sure there are no name collisions across resources")) + }) + + It("returns an error if the instance name is greater than maxNameLength", func() { + typemeta := metav1.TypeMeta{ + Kind: "IBPCA", + } + objectmeta := metav1.ObjectMeta{ + Name: "012345678901234567890123456789", + } + maxNameLength := 25 + err := util.ValidationChecks(typemeta, objectmeta, "IBPCA", &maxNameLength) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("is too long, the name must be less than or equal to ")) + }) + + It("returns an error if the instance name is greater than default name length", func() { + typemeta := metav1.TypeMeta{ + Kind: "IBPCA", + } + objectmeta := metav1.ObjectMeta{ + Name: "0123456789012345678901234567890123", + } + err := util.ValidationChecks(typemeta, objectmeta, "IBPCA", nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("is too long, the name must be less than or equal to")) + }) + }) + + Context("HSM proxy endpoint validation", func() { + It("returns no error for a valid endpoint", func() { + err := util.ValidateHSMProxyURL("tcp://0.0.0.0:2348") + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns no error for a valid TLS endpoint", func() { + err := util.ValidateHSMProxyURL("tls://0.0.0.0:2348") + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns an error for incomplete endpoint", func() { + err := util.ValidateHSMProxyURL("tcp://0.0.0.0") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("must specify both IP address and port")) + }) + + It("returns an error for missing port", func() { + err := util.ValidateHSMProxyURL("tcp://0.0.0.0:") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("missing port")) + }) + + It("returns an error for missing IP address", func() { + err := util.ValidateHSMProxyURL("tcp://:2348") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("missing IP address")) + }) + + It("returns an error for invalid scheme", func() { + err := util.ValidateHSMProxyURL("http://0.0.0.0:8888") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("unsupported scheme 'http', only tcp and tls are supported")) + }) + }) + + Context("append image pull secret if missing", func() { + var ( + pullSecrets []corev1.LocalObjectReference + ) + + BeforeEach(func() { + pullSecrets = []corev1.LocalObjectReference{ + corev1.LocalObjectReference{ + Name: "pullsecret1", + }, + } + }) + + It("appends new image pull secret", func() { + new := corev1.LocalObjectReference{Name: "pullsecret2"} + pullSecrets := util.AppendImagePullSecretIfMissing(pullSecrets, new) + Expect(len(pullSecrets)).To(Equal(2)) + Expect(pullSecrets[1]).To(Equal(new)) + }) + + It("does not append existing image pull secret", func() { + new := corev1.LocalObjectReference{Name: "pullsecret1"} + pullSecrets := util.AppendImagePullSecretIfMissing(pullSecrets, new) + Expect(len(pullSecrets)).To(Equal(1)) + Expect(pullSecrets[0].Name).To(Equal("pullsecret1")) + }) + + It("does not appen blank image pull secret", func() { + new := corev1.LocalObjectReference{} + pullSecrets := util.AppendImagePullSecretIfMissing(pullSecrets, new) + Expect(len(pullSecrets)).To(Equal(1)) + Expect(pullSecrets[0].Name).To(Equal("pullsecret1")) + }) + }) +}) diff --git a/sample-network/.gitignore b/sample-network/.gitignore new file mode 100644 index 00000000..0bb22341 --- /dev/null +++ b/sample-network/.gitignore @@ -0,0 +1,4 @@ +network-debug.log +network.log +temp/ +config/configtx.yaml \ No newline at end of file diff --git a/sample-network/README.md b/sample-network/README.md new file mode 100644 index 00000000..4e7ee6f3 --- /dev/null +++ b/sample-network/README.md @@ -0,0 +1,207 @@ +# Sample Network + +This project uses the operator to launch a Fabric network on a local KIND or k3s cluster. + +- Apply `kustomization` overlays to install the Operator +- Apply `kustomization` overlays to construct a Fabric Network +- Call `peer` CLI and channel participation SDKs to administer the network +- Deploy _Chaincode-as-a-Service_ smart contracts +- Develop _Gateway Client_ applications on a local workstation + +Feedback, comments, questions, etc. at Discord : [#fabric-kubernetes](https://discord.gg/hyperledger) + +![sample-network](../docs/images/fabric-operator-sample-network.png) + +## Prerequisites: + +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [jq](https://stedolan.github.io/jq/) +- [envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) (`brew install gettext` on OSX) + +- K8s - either: + - [KIND](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) + [Docker](https://www.docker.com) (resources: 8 CPU / 8 GRAM) + - [Rancher Desktop](https://rancherdesktop.io) (resources: 8 CPU / 8GRAM, mobyd, and disable Traefik) + + +### Ingress and DNS + +Networks created with the operator include Ingress / Route resources to expose services at a common, +virtual DNS domain (e.g. `*.my-blockchain.example.com`). For local development, the cluster includes an Nginx ingress +controller configured for TLS traffic in [SSL Passthrough](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough) +mode. + +Before installing the network, you must determine an IP address for your system which is visible _both_ to pods running in +Kubernetes _AND_ to the host OS. In conjunction with the [Dead simple wildcard DNS for any IP Address](https://nip.io) +resolver, the cluster IP can be used to route traffic for a virtual DNS domain to pods running in Kubernetes. +For example, if the ingress IP is 9.160.3.138:443, Fabric services will be exposed at the DNS wildcard domain +`*.9-160-3-138.nip.io`. + +- On machines connected to the IBM network, use the "9.x.y.z" tunnel address assigned by the VPN +- On machines running Rancher, use the IP address assigned by DHCP (e.g. 192.168.0.11) +- On machines running an embedded VM (WSL, virtualbox, VMWare, etc.), use the IP address of the bridge interface for the guest VM. +- TODO: what about non-VPN resolvers? + +E.g., use the BlueZone 9.x when connected to the IBM VPN: +```shell +export TEST_NETWORK_IPADDR=$(ifconfig -a | grep "inet " | awk '{print $2}' | grep ^9\.) + +export TEST_NETWORK_DOMAIN=$(echo $TEST_NETWORK_IPADDR | tr -s '.' '-').nip.io +``` + + + +### Fabric Binaries + +Fabric binaries (peer, osnadmin, etc.) will be installed into the local `bin` folder. Add these to your PATH: + +```shell +export PATH=${PWD}:${PWD}/bin:$PATH +``` + +On OSX, there is a bug in the Golang DNS resolver, causing the Fabric binaries to stall out when resolving DNS. +See [Fabric #3372](https://github.com/hyperledger/fabric/issues/3372) and [Golang #43398](https://github.com/golang/go/issues/43398). +Fix this by turning a build of [fabric](https://github.com/hyperledger/fabric) binaries and copying the build outputs +from `fabric/build/bin/*` --> `sample-network/bin` + + +## Test Network + +Create a Kubernetes cluster, Nginx ingress, and Fabric CRDs: +```shell +network kind +network cluster init +``` + +Launch the operator and `kustomize` a network of [CAs](config/cas), [peers](config/peers), and [orderers](config/orderers): +```shell +network up +``` + +Explore Kubernetes `Pods`, `Deployments`, `Services`, `Ingress`, etc.: +```shell +kubectl -n test-network get all +``` + +## Chaincode + +The operator is compatible with sample _Chaincode-as-a-Service_ smart contracts. + +Clone the [fabric-samples](https://github.com/hyperledger/fabric-samples) git repository: +```shell +git clone git@github.com:hyperledger/fabric-samples.git /tmp/fabric-samples +``` + +Create a channel: +```shell +network channel create +``` + +Deploy a sample contract: +```shell +network cc deploy asset-transfer-basic basic_1.0 /tmp/fabric-samples/asset-transfer-basic/chaincode-java + +network cc metadata asset-transfer-basic +network cc invoke asset-transfer-basic '{"Args":["InitLedger"]}' +network cc query asset-transfer-basic '{"Args":["ReadAsset","asset1"]}' | jq +``` + +Or set the `peer` CLI context to org1 peer1: +```shell +export FABRIC_CFG_PATH=${PWD}/temp/config +export CORE_PEER_LOCALMSPID=Org1MSP +export CORE_PEER_ADDRESS=test-network-org1-peer1-peer.${TEST_NETWORK_DOMAIN}:443 +export CORE_PEER_TLS_ENABLED=true +export CORE_PEER_MSPCONFIGPATH=${PWD}/temp/enrollments/org1/users/org1admin/msp +export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/temp/channel-msp/peerOrganizations/org1/msp/tlscacerts/tlsca-signcert.pem +``` + +and directly interact with the contract: +```shell +peer chaincode query -n asset-transfer-basic -C mychannel -c '{"Args":["org.hyperledger.fabric:GetMetadata"]}' +``` + +## K8s Chaincode Builder + +The operator can also be configured for use with the [fabric-builder-k8s](https://github.com/hyperledgendary/fabric-builder-k8s) +chaincode builder, providing smooth and immediate _Chaincode Right Now!_ deployments. + +Reconstruct the network with the "k8s-fabric-peer" image: +```yaml +network down + +export TEST_NETWORK_PEER_IMAGE=ghcr.io/hyperledgendary/k8s-fabric-peer +export TEST_NETWORK_PEER_IMAGE_LABEL=v0.5.0 + +network up +network channel create +``` + +Download a "k8s" chaincode package: +```shell +curl -fsSL https://github.com/hyperledgendary/conga-nft-contract/releases/download/v0.1.1/conga-nft-contract-v0.1.1.tgz -o conga-nft-contract-v0.1.1.tgz +``` + +Install the smart contract: +```shell +peer lifecycle chaincode install conga-nft-contract-v0.1.1.tgz + +export PACKAGE_ID=$(peer lifecycle chaincode calculatepackageid conga-nft-contract-v0.1.1.tgz) && echo $PACKAGE_ID + +peer lifecycle \ + chaincode approveformyorg \ + --channelID mychannel \ + --name conga-nft-contract \ + --version 1 \ + --package-id ${PACKAGE_ID} \ + --sequence 1 \ + --orderer test-network-org0-orderersnode1-orderer.${TEST_NETWORK_DOMAIN}:443 \ + --tls --cafile $PWD/temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + +peer lifecycle \ + chaincode commit \ + --channelID mychannel \ + --name conga-nft-contract \ + --version 1 \ + --sequence 1 \ + --orderer test-network-org0-orderersnode1-orderer.${TEST_NETWORK_DOMAIN}:443 \ + --tls --cafile $PWD/temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + +``` + +Inspect chaincode pods: +```shell +kubectl -n test-network describe pods -l app.kubernetes.io/created-by=fabric-builder-k8s +``` + +Query the smart contract: +```shell +peer chaincode query -n conga-nft-contract -C mychannel -c '{"Args":["org.hyperledger.fabric:GetMetadata"]}' +``` + + +## Teardown + +Invariably, something in the recipe above will go awry. Look for additional diagnostics in network-debug.log and +reset the stage with: + +```shell +network down +``` +or +```shell +network unkind +``` + + + +## Appendix: Operations Console + +Launch the [Fabric Operations Console](https://github.com/hyperledger-labs/fabric-operations-console): +```shell +network console +``` + +- open `https://test-network-hlf-console-console.${TEST_NETWORK_DOMAIN}` +- Accept the self-signed TLS certificate +- Log in as `admin:password` +- [Build a network](https://cloud.ibm.com/docs/blockchain?topic=blockchain-ibp-console-build-network) diff --git a/sample-network/config/cas/kustomization.yaml b/sample-network/config/cas/kustomization.yaml new file mode 100644 index 00000000..0d354cf4 --- /dev/null +++ b/sample-network/config/cas/kustomization.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - org0-ca.yaml + - org1-ca.yaml + - org2-ca.yaml diff --git a/sample-network/config/cas/org0-ca.yaml b/sample-network/config/cas/org0-ca.yaml new file mode 100644 index 00000000..41001f64 --- /dev/null +++ b/sample-network/config/cas/org0-ca.yaml @@ -0,0 +1,135 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPCA +metadata: + name: org0-ca +spec: + action: + renew: {} + configoverride: + ca: + affiliations: + org1: + - department1 + - department2 + org2: + - department1 + registry: + identities: + - name: admin + pass: adminpw + type: client + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + - name: orderer1 + pass: orderer1pw + type: orderer + - name: orderer2 + pass: orderer2pw + type: orderer + - name: orderer3 + pass: orderer3pw + type: orderer + - name: org0admin + pass: org0adminpw + type: admin + debug: true + signing: + default: + expiry: 87600h0m0s + tlsca: + affiliations: + org1: + - department1 + - department2 + org0: + - department1 + registry: + identities: + - name: admin + pass: adminpw + type: client # todo: shouldn't this be an admin? + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + - name: orderer1 + pass: orderer1pw + type: orderer + - name: orderer2 + pass: orderer2pw + type: orderer + - name: orderer3 + pass: orderer3pw + type: orderer + - name: org0admin + pass: org0adminpw + type: admin + + debug: true + signing: + default: + expiry: 87600h0m0s + customNames: + pvc: {} + domain: "${DOMAIN}" + images: + caImage: ${CA_IMAGE} + caTag: ${CA_IMAGE_LABEL} + caInitImage: ${INIT_IMAGE} + caInitTag: ${INIT_IMAGE_LABEL} + ingress: + class: "" + tlsSecretName: "" + license: + accept: true + replicas: 1 + resources: + ca: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + service: + type: ClusterIP + storage: + ca: + class: "${STORAGE_CLASS}" + size: 100M + version: ${FABRIC_CA_VERSION} diff --git a/sample-network/config/cas/org1-ca.yaml b/sample-network/config/cas/org1-ca.yaml new file mode 100644 index 00000000..03257861 --- /dev/null +++ b/sample-network/config/cas/org1-ca.yaml @@ -0,0 +1,128 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPCA +metadata: + name: org1-ca +spec: + action: + renew: {} + configoverride: + ca: + affiliations: + org1: + - department1 + - department2 + org2: + - department1 + registry: + identities: + - name: admin + pass: adminpw + type: client + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + - name: peer1 + pass: peer1pw + type: peer + - name: peer2 + pass: peer2pw + type: peer + - name: org1admin + pass: org1adminpw + type: admin + debug: true + signing: + default: + expiry: 87600h0m0s + tlsca: + affiliations: + org1: + - department1 + - department2 + org2: + - department1 + registry: + identities: + - name: admin + pass: adminpw + type: client # todo: shouldn't this be an admin? + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + - name: peer1 + pass: peer1pw + type: peer + - name: peer2 + pass: peer2pw + type: peer + - name: org1admin + pass: org1adminpw + type: admin + debug: true + signing: + default: + expiry: 87600h0m0s + customNames: + pvc: {} + domain: "${DOMAIN}" + images: + caImage: ${CA_IMAGE} + caTag: ${CA_IMAGE_LABEL} + caInitImage: ${INIT_IMAGE} + caInitTag: ${INIT_IMAGE_LABEL} + ingress: + class: "" + tlsSecretName: "" + license: + accept: true + replicas: 1 + resources: + ca: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + service: + type: ClusterIP + storage: + ca: + class: "${STORAGE_CLASS}" + size: 100M + version: ${FABRIC_CA_VERSION} diff --git a/sample-network/config/cas/org2-ca.yaml b/sample-network/config/cas/org2-ca.yaml new file mode 100644 index 00000000..27c57c9f --- /dev/null +++ b/sample-network/config/cas/org2-ca.yaml @@ -0,0 +1,130 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPCA +metadata: + name: org2-ca +spec: + action: + renew: {} + configoverride: + ca: + affiliations: + org1: + - department1 + - department2 + org2: + - department1 + registry: + identities: + - name: admin + pass: adminpw + type: client + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + - name: peer1 + pass: peer1pw + type: peer + - name: peer2 + pass: peer2pw + type: peer + - name: org2admin + pass: org2adminpw + type: admin + debug: true + signing: + default: + expiry: 87600h0m0s + tlsca: + affiliations: + org1: + - department1 + - department2 + org2: + - department1 + registry: + identities: + - name: admin + pass: adminpw + type: client # todo: shouldn't this be an admin? + attrs: + hf.Registrar.Roles: "*" + hf.Registrar.DelegateRoles: "*" + hf.Revoker: true + hf.IntermediateCA: true + hf.GenCRL: true + hf.Registrar.Attributes: "*" + hf.AffiliationMgr: true + - name: peer1 + pass: peer1pw + type: peer + - name: peer2 + pass: peer2pw + type: peer + - name: org2admin + pass: org2adminpw + type: admin + debug: true + signing: + default: + expiry: 87600h0m0s + customNames: + pvc: {} + domain: "${DOMAIN}" + # imagePullSecrets: + # - regcred + images: + caImage: ${CA_IMAGE} + caTag: ${CA_IMAGE_LABEL} + caInitImage: ${INIT_IMAGE} + caInitTag: ${INIT_IMAGE_LABEL} + ingress: + class: "" + tlsSecretName: "" + license: + accept: true + replicas: 1 + resources: + ca: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + service: + type: ClusterIP + storage: + ca: + class: "${STORAGE_CLASS}" + size: 100M + version: ${FABRIC_CA_VERSION} diff --git a/sample-network/config/configtx-template.yaml b/sample-network/config/configtx-template.yaml new file mode 100644 index 00000000..ccdcd937 --- /dev/null +++ b/sample-network/config/configtx-template.yaml @@ -0,0 +1,428 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +--- +################################################################################ +# +# Section: Organizations +# +# - This section defines the different organizational identities which will +# be referenced later in the configuration. +# +################################################################################ +Organizations: + + # SampleOrg defines an MSP using the sampleconfig. It should never be used + # in production but may be used as a template for other definitions + - &OrdererOrg + # DefaultOrg defines the organization which is used in the sampleconfig + # of the fabric.git development environment + Name: OrdererOrg + + # ID to load the MSP definition as + ID: OrdererMSP + + # MSPDir is the filesystem path which contains the MSP configuration + MSPDir: ../../temp/channel-msp/ordererOrganizations/org0/msp + + # Policies defines the set of policies at this level of the config tree + # For organization policies, their canonical path is usually + # /Channel/// + Policies: + Readers: + Type: Signature + Rule: "OR('OrdererMSP.member')" + Writers: + Type: Signature + Rule: "OR('OrdererMSP.member')" + Admins: + Type: Signature + Rule: "OR('OrdererMSP.admin')" + + OrdererEndpoints: + - org0-orderersnode1.${KUBE_DNS_DOMAIN}:7050 + - org0-orderersnode2.${KUBE_DNS_DOMAIN}:7050 + - org0-orderersnode3.${KUBE_DNS_DOMAIN}:7050 + + - &Org1 + # DefaultOrg defines the organization which is used in the sampleconfig + # of the fabric.git development environment + Name: Org1MSP + + # ID to load the MSP definition as + ID: Org1MSP + + MSPDir: ../../temp/channel-msp/peerOrganizations/org1/msp + + # Policies defines the set of policies at this level of the config tree + # For organization policies, their canonical path is usually + # /Channel/// + Policies: + Readers: + Type: Signature + Rule: "OR('Org1MSP.admin', 'Org1MSP.peer', 'Org1MSP.client')" + Writers: + Type: Signature + Rule: "OR('Org1MSP.admin', 'Org1MSP.client')" + Admins: + Type: Signature + Rule: "OR('Org1MSP.admin')" + Endorsement: + Type: Signature + Rule: "OR('Org1MSP.peer')" + + # leave this flag set to true. + AnchorPeers: + # AnchorPeers defines the location of peers which can be used + # for cross org gossip communication. Note, this value is only + # encoded in the genesis block in the Application section context + - Host: org1-peer1.${KUBE_DNS_DOMAIN} + Port: 7051 + + - &Org2 + # DefaultOrg defines the organization which is used in the sampleconfig + # of the fabric.git development environment + Name: Org2MSP + + # ID to load the MSP definition as + ID: Org2MSP + + MSPDir: ../../temp/channel-msp/peerOrganizations/org2/msp + + # Policies defines the set of policies at this level of the config tree + # For organization policies, their canonical path is usually + # /Channel/// + Policies: + Readers: + Type: Signature + Rule: "OR('Org2MSP.admin', 'Org2MSP.peer', 'Org2MSP.client')" + Writers: + Type: Signature + Rule: "OR('Org2MSP.admin', 'Org2MSP.client')" + Admins: + Type: Signature + Rule: "OR('Org2MSP.admin')" + Endorsement: + Type: Signature + Rule: "OR('Org2MSP.peer')" + + AnchorPeers: + # AnchorPeers defines the location of peers which can be used + # for cross org gossip communication. Note, this value is only + # encoded in the genesis block in the Application section context + - Host: org2-peer1.${KUBE_DNS_DOMAIN} + Port: 7051 + +################################################################################ +# +# SECTION: Capabilities +# +# - This section defines the capabilities of fabric network. This is a new +# concept as of v1.1.0 and should not be utilized in mixed networks with +# v1.0.x peers and orderers. Capabilities define features which must be +# present in a fabric binary for that binary to safely participate in the +# fabric network. For instance, if a new MSP type is added, newer binaries +# might recognize and validate the signatures from this type, while older +# binaries without this support would be unable to validate those +# transactions. This could lead to different versions of the fabric binaries +# having different world states. Instead, defining a capability for a channel +# informs those binaries without this capability that they must cease +# processing transactions until they have been upgraded. For v1.0.x if any +# capabilities are defined (including a map with all capabilities turned off) +# then the v1.0.x peer will deliberately crash. +# +################################################################################ +Capabilities: + # Channel capabilities apply to both the orderers and the peers and must be + # supported by both. + # Set the value of the capability to true to require it. + Channel: &ChannelCapabilities + # V2_0 capability ensures that orderers and peers behave according + # to v2.0 channel capabilities. Orderers and peers from + # prior releases would behave in an incompatible way, and are therefore + # not able to participate in channels at v2.0 capability. + # Prior to enabling V2.0 channel capabilities, ensure that all + # orderers and peers on a channel are at v2.0.0 or later. + V2_0: true + + # Orderer capabilities apply only to the orderers, and may be safely + # used with prior release peers. + # Set the value of the capability to true to require it. + Orderer: &OrdererCapabilities + # V2_0 orderer capability ensures that orderers behave according + # to v2.0 orderer capabilities. Orderers from + # prior releases would behave in an incompatible way, and are therefore + # not able to participate in channels at v2.0 orderer capability. + # Prior to enabling V2.0 orderer capabilities, ensure that all + # orderers on channel are at v2.0.0 or later. + V2_0: true + + # Application capabilities apply only to the peer network, and may be safely + # used with prior release orderers. + # Set the value of the capability to true to require it. + Application: &ApplicationCapabilities + # V2_0 application capability ensures that peers behave according + # to v2.0 application capabilities. Peers from + # prior releases would behave in an incompatible way, and are therefore + # not able to participate in channels at v2.0 application capability. + # Prior to enabling V2.0 application capabilities, ensure that all + # peers on channel are at v2.0.0 or later. + V2_0: true + +################################################################################ +# +# SECTION: Application +# +# - This section defines the values to encode into a config transaction or +# genesis block for application related parameters +# +################################################################################ +Application: &ApplicationDefaults + + # Organizations is the list of orgs which are defined as participants on + # the application side of the network + Organizations: + + # Policies defines the set of policies at this level of the config tree + # For Application policies, their canonical path is + # /Channel/Application/ + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + LifecycleEndorsement: + Type: Signature + Rule: "OR('Org1MSP.peer','Org2MSP.peer')" + Endorsement: + Type: Signature + Rule: "OR('Org1MSP.peer','Org2MSP.peer')" + + Capabilities: + <<: *ApplicationCapabilities +################################################################################ +# +# SECTION: Orderer +# +# - This section defines the values to encode into a config transaction or +# genesis block for orderer related parameters +# +################################################################################ +Orderer: &OrdererDefaults + + # Orderer Type: The orderer implementation to start + OrdererType: etcdraft + + EtcdRaft: + Consenters: + - Host: org0-orderersnode1.${KUBE_DNS_DOMAIN} + Port: 7050 + ClientTLSCert: ../../temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + ServerTLSCert: ../../temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + - Host: org0-orderersnode2.${KUBE_DNS_DOMAIN} + Port: 7050 + ClientTLSCert: ../../temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode2/tls/signcerts/tls-cert.pem + ServerTLSCert: ../../temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode2/tls/signcerts/tls-cert.pem + - Host: org0-orderersnode3.${KUBE_DNS_DOMAIN} + Port: 7050 + ClientTLSCert: ../../temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode3/tls/signcerts/tls-cert.pem + ServerTLSCert: ../../temp/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode3/tls/signcerts/tls-cert.pem + + + # Options to be specified for all the etcd/raft nodes. The values here + # are the defaults for all new channels and can be modified on a + # per-channel basis via configuration updates. + Options: + # TickInterval is the time interval between two Node.Tick invocations. + #TickInterval: 500ms default + TickInterval: 2500ms + + # ElectionTick is the number of Node.Tick invocations that must pass + # between elections. That is, if a follower does not receive any + # message from the leader of current term before ElectionTick has + # elapsed, it will become candidate and start an election. + # ElectionTick must be greater than HeartbeatTick. + # ElectionTick: 10 default + ElectionTick: 5 + + # HeartbeatTick is the number of Node.Tick invocations that must + # pass between heartbeats. That is, a leader sends heartbeat + # messages to maintain its leadership every HeartbeatTick ticks. + HeartbeatTick: 1 + + # MaxInflightBlocks limits the max number of in-flight append messages + # during optimistic replication phase. + MaxInflightBlocks: 5 + + # SnapshotIntervalSize defines number of bytes per which a snapshot is taken + SnapshotIntervalSize: 16 MB + + # Batch Timeout: The amount of time to wait before creating a batch + BatchTimeout: 2s + + # Batch Size: Controls the number of messages batched into a block + BatchSize: + + # Max Message Count: The maximum number of messages to permit in a batch + MaxMessageCount: 10 + + # Absolute Max Bytes: The absolute maximum number of bytes allowed for + # the serialized messages in a batch. + AbsoluteMaxBytes: 99 MB + + # Preferred Max Bytes: The preferred maximum number of bytes allowed for + # the serialized messages in a batch. A message larger than the preferred + # max bytes will result in a batch larger than preferred max bytes. + PreferredMaxBytes: 512 KB + + # Organizations is the list of orgs which are defined as participants on + # the orderer side of the network + Organizations: + + # Policies defines the set of policies at this level of the config tree + # For Orderer policies, their canonical path is + # /Channel/Orderer/ + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + # BlockValidation specifies what signatures must be included in the block + # from the orderer for the peer to validate it. + BlockValidation: + Type: ImplicitMeta + Rule: "ANY Writers" + +################################################################################ +# +# CHANNEL +# +# This section defines the values to encode into a config transaction or +# genesis block for channel related parameters. +# +################################################################################ +Channel: &ChannelDefaults + # Policies defines the set of policies at this level of the config tree + # For Channel policies, their canonical path is + # /Channel/ + Policies: + # Who may invoke the 'Deliver' API + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + # Who may invoke the 'Broadcast' API + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + # By default, who may modify elements at this config level + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + + # Capabilities describes the channel level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *ChannelCapabilities + +################################################################################ +# +# Profile +# +# - Different configuration profiles may be encoded here to be specified +# as parameters to the configtxgen tool +# +################################################################################ +Profiles: + + # test network profile with application (not system) channel. + TwoOrgsApplicationGenesis: + <<: *ChannelDefaults + Orderer: + <<: *OrdererDefaults + Organizations: + - *OrdererOrg + Capabilities: *OrdererCapabilities + Application: + <<: *ApplicationDefaults + Organizations: + - *Org1 + - *Org2 + Capabilities: *ApplicationCapabilities + + + # + # Unclear lineage for these profiles: nano-fab? + # + # TwoOrgsOrdererGenesis will construct a system channel as it has a Consortiums stanza, which is not + # compatible with osnadmin. + # + # @enyeart - which profile should be used for the kube test network? + # + TwoOrgsOrdererGenesis: + <<: *ChannelDefaults + Orderer: + <<: *OrdererDefaults + OrdererType: etcdraft + Organizations: + - *OrdererOrg + Capabilities: + <<: *OrdererCapabilities + Consortiums: + SampleConsortium: + Organizations: + - *Org1 + - *Org2 + TwoOrgsChannel: + Consortium: SampleConsortium + <<: *ChannelDefaults + Application: + <<: *ApplicationDefaults + Organizations: + - *Org1 + - *Org2 + Capabilities: + <<: *ApplicationCapabilities + Org1Channel: + Consortium: SampleConsortium + <<: *ChannelDefaults + Application: + <<: *ApplicationDefaults + Organizations: + - *Org1 + Capabilities: + <<: *ApplicationCapabilities + Org2Channel: + Consortium: SampleConsortium + <<: *ChannelDefaults + Application: + <<: *ApplicationDefaults + Organizations: + - *Org2 + Capabilities: + <<: *ApplicationCapabilities diff --git a/sample-network/config/console/hlf-operations-console.yaml b/sample-network/config/console/hlf-operations-console.yaml new file mode 100644 index 00000000..e43cb710 --- /dev/null +++ b/sample-network/config/console/hlf-operations-console.yaml @@ -0,0 +1,80 @@ +--- +apiVersion: ibp.com/v1beta1 +kind: IBPConsole +metadata: + name: hlf-console +spec: + arch: + - amd64 + license: + accept: true + serviceAccountName: default + email: "${CONSOLE_USERNAME}" + password: "${CONSOLE_PASSWORD}" + allowDefaultPassword: true + # registryURL: ghcr.io/ibm-blockchain + imagePullSecrets: + - ghcr-pull-secret + images: + consoleInitImage: ${INIT_IMAGE} + consoleInitTag: ${INIT_IMAGE_LABEL} + consoleImage: ${CONSOLE_IMAGE} + consoleTag: ${CONSOLE_IMAGE_LABEL} + deployerImage: ${DEPLOYER_IMAGE} + deployerTag: ${DEPLOYER_IMAGE_LABEL} + configtxlatorImage: ${TOOLS_IMAGE} + configtxlatorTag: ${TOOLS_IMAGE_LABEL} + couchdbImage: ${COUCHDB_IMAGE} + couchdbTag: ${COUCHDB_IMAGE_LABEL} + networkinfo: + domain: "${CONSOLE_DOMAIN}" + storage: + console: + class: "${STORAGE_CLASS}" + size: 5Gi + usetags: true + version: 1.0.0 + resources: + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 20M + configtxlator: + limits: + cpu: 25m + ephemeral-storage: 1G + memory: 50Mi + requests: + cpu: 25m + ephemeral-storage: 100M + memory: 50Mi + couchdb: + limits: + cpu: 500m + ephemeral-storage: 1Gi + memory: 1000Mi + requests: + cpu: 50m + ephemeral-storage: 100Mi + memory: 128Mi + deployer: + limits: + cpu: 100m + ephemeral-storage: 1G + memory: 200Mi + requests: + cpu: 100m + ephemeral-storage: 100M + memory: 128Mi + console: + limits: + cpu: 500m + ephemeral-storage: 1G + memory: 1000Mi + requests: + cpu: 50m + ephemeral-storage: 100M + memory: 128Mi \ No newline at end of file diff --git a/sample-network/config/console/kustomization.yaml b/sample-network/config/console/kustomization.yaml new file mode 100644 index 00000000..1f7ec5bd --- /dev/null +++ b/sample-network/config/console/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - hlf-operations-console.yaml diff --git a/sample-network/config/core.yaml b/sample-network/config/core.yaml new file mode 100644 index 00000000..2097c563 --- /dev/null +++ b/sample-network/config/core.yaml @@ -0,0 +1,775 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer address (see below). If specified peer address is invalid then it + # will fallback to the auto detected IP (local IP) regardless of the peer + # addressAutoDetect value. + # chaincodeAddress: 0.0.0.0:7052 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + # When set to true, will override peer address. + addressAutoDetect: false + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. + useLeaderElection: false + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization. Multiple peers or all peers in an organization + # may be configured as org leaders, so that they all pull + # blocks directly from ordering service. + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # Max number of attempts to connect to a peer + maxConnectionAttempts: 120 + # Message expiration factor for alive messages + msgExpirationFactor: 20 + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. + # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values + # for disseminating private data. + # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to + # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. + implicitCollectionDisseminationPolicy: + # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully + # disseminate private data for its own implicit collection during endorsement. Default value is 0. + requiredPeerCount: 0 + # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to + # disseminate private data for its own implicit collection during endorsement. Default value is 1. + maxPeerCount: 1 + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network. + # Keep in mind that when peer.gossip.useLeaderElection is true + # and there are several peers in the organization, + # or peer.gossip.useLeaderElection is false alongside with + # peer.gossip.orgleader being false, the peer's ledger may lag behind + # the rest of the peers and will never catch up due to state transfer + # being disabled. + enabled: false + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actual buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: true + # Require client certificates / mutual TLS for inbound connections. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server + key: + file: tls/server.key + # rootcert.file represents the trusted root certificate chain used for verifying certificates + # of other nodes during outbound connections. + # It is not required to be set, but can be used to augment the set of TLS CA certificates + # available from the MSPs of each channel’s configuration. + rootcert: + file: tls/ca.crt + # If mutual TLS is enabled, clientRootCAs.files contains a list of additional root certificates + # used for verifying certificates of client connections. + # It augments the set of TLS CA certificates available from the MSPs of each channel’s configuration. + # Minimally, set your organization's TLS CA root certificate so that the peer can receive join channel requests. + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. + # If not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: + # Token Label + Label: + # User PIN + Pin: + Hash: + Security: + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: Org1MSP + + # CLI common client config options + client: + # connection timeout + connTimeout: 10s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. + addressOverrides: + # - from: + # to: + # caCertsFile: + # - from: + # to: + # caCertsFile: + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running requests to a service on each peer. + # Currently this option is only applied to endorser service and deliver service. + # When the property is missing or the value is 0, the concurrency limit is disabled for the service. + concurrency: + # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, + # including both user chaincodes and system chaincodes. + endorserService: 2500 + # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. + deliverService: 2500 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + # endpoint: unix:///var/run/docker.sock + + # DISABLE the docker daemon endpoint to prevent /healthz from checking for docker in "External Builder" mode. + endpoint: + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + LogConfig: + Type: json-file + Config: + max-size: "50m" + max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: + - path: /var/hyperledger/fabric/chaincode/ccs-builder + name: ccs-builder + propagateEnvironment: + - HOME + - CORE_PEER_ID + - CORE_PEER_LOCALMSPID + + # The maximum duration to wait for the chaincode build and install process + # to complete. + installTimeout: 300s + + # Timeout duration for starting up a container and waiting for Register + # to come through. + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communication goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # enabled system chaincodes + system: + _lifecycle: enable + cscc: enable + lscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup. + # The delay between retries doubles for each attempt. + # Default of 10 retries results in 11 attempts over 2 minutes. + maxRetriesOnStartup: 10 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state + # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple + # of 32 MB, the peer would round the size to the next multiple of 32 MB. + # To disable the cache, 0 MB needs to be assigned to the cacheSize. + cacheSize: 64 + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + # The missing data entries are classified into two categories: + # (1) prioritized + # (2) deprioritized + # Initially, all missing data are in the prioritized list. When the + # reconciler is unable to fetch the missing data from other peers, + # the unreconciled missing data would be moved to the deprioritized list. + # The reconciler would retry deprioritized missing data after every + # deprioritizedDataReconcilerInterval (unit: minutes). Note that the + # interval needs to be greater than the reconcileSleepInterval + deprioritizedDataReconcilerInterval: 60m + + snapshots: + # Path on the file system where peer will store ledger snapshots + rootDir: /var/hyperledger/production/snapshots + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: disabled + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/sample-network/config/manager/hlf-operator-manager.yaml b/sample-network/config/manager/hlf-operator-manager.yaml new file mode 100644 index 00000000..41791782 --- /dev/null +++ b/sample-network/config/manager/hlf-operator-manager.yaml @@ -0,0 +1,123 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fabric-operator + labels: + release: "operator" + helm.sh/chart: "hlf" + app.kubernetes.io/name: "hlf" + app.kubernetes.io/instance: "hlf" + app.kubernetes.io/managed-by: "fabric-operator" +spec: + replicas: 1 + strategy: + type: "Recreate" + selector: + matchLabels: + name: fabric-operator + template: + metadata: + labels: + name: fabric-operator + release: "operator" + helm.sh/chart: "hlf" + app.kubernetes.io/name: "hlf" + app.kubernetes.io/instance: "hlf" + app.kubernetes.io/managed-by: "fabric-operator" + annotations: + productName: "IBM Support for Hyperledger Fabric" + productID: "5d5997a033594f149a534a09802d60f1" + productVersion: "1.0.0" + productChargedContainers: "" + productMetric: "VIRTUAL_PROCESSOR_CORE" + spec: + # hostIPC: false + # hostNetwork: false + # hostPID: false + serviceAccountName: hlf-operator + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + # securityContext: + # runAsNonRoot: true + # runAsUser: 1001 + # fsGroup: 2000 + + imagePullSecrets: + - name: ghcr-pull-secret + + containers: + - name: fabric-operator + image: ${FABRIC_OPERATOR_IMAGE} + imagePullPolicy: ${IMAGE_PULL_POLICY} + + command: + - ibp-operator + # securityContext: + # privileged: false + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: false + # runAsNonRoot: false + # runAsUser: 1001 + # capabilities: + # drop: + # - ALL + # add: + # - CHOWN + # - FOWNER + livenessProbe: + tcpSocket: + port: 8383 + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + tcpSocket: + port: 8383 + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 5 + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "fabric-operator" + - name: CLUSTERTYPE + value: K8S + resources: + requests: + cpu: 10m + memory: 10Mi + limits: + cpu: 100m + memory: 200Mi diff --git a/sample-network/config/manager/kustomization.yaml b/sample-network/config/manager/kustomization.yaml new file mode 100644 index 00000000..9bc57c5e --- /dev/null +++ b/sample-network/config/manager/kustomization.yaml @@ -0,0 +1,22 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - hlf-operator-manager.yaml diff --git a/sample-network/config/orderers/kustomization.yaml b/sample-network/config/orderers/kustomization.yaml new file mode 100644 index 00000000..91c9041d --- /dev/null +++ b/sample-network/config/orderers/kustomization.yaml @@ -0,0 +1,24 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - org0-orderers.yaml + diff --git a/sample-network/config/orderers/org0-orderers.yaml b/sample-network/config/orderers/org0-orderers.yaml new file mode 100644 index 00000000..e619be9a --- /dev/null +++ b/sample-network/config/orderers/org0-orderers.yaml @@ -0,0 +1,151 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPOrderer +metadata: + name: org0-orderers +spec: + version: "${FABRIC_VERSION}" + domain: "${DOMAIN}" + license: + accept: true + action: + enroll: {} + reenroll: {} + clusterSize: 3 + clusterconfigoverride: + - general: + keepalive: + serverMinInterval: 61s + - general: + keepalive: + serverMinInterval: 61s + - general: + keepalive: + serverMinInterval: 61s + clustersecret: + - enrollment: + component: + caname: ca + cahost: test-network-org0-ca-ca.${DOMAIN} + caport: "443" + catls: + cacert: "${ORG0_CA_CERT}" + enrollid: "orderer1" + enrollsecret: "orderer1pw" + tls: + caname: tlsca + cahost: test-network-org0-ca-ca.${DOMAIN} + caport: "443" + catls: + cacert: "${ORG0_CA_CERT}" + enrollid: "orderer1" + enrollsecret: "orderer1pw" + csr: + hosts: + - "org0-orderersnode1" + - "org0-orderersnode1.${KUBE_DNS_DOMAIN}" + - enrollment: + component: + caname: ca + cahost: test-network-org0-ca-ca.${DOMAIN} + caport: "443" + catls: + cacert: "${ORG0_CA_CERT}" + enrollid: "orderer2" + enrollsecret: "orderer2pw" + tls: + caname: tlsca + cahost: test-network-org0-ca-ca.${DOMAIN} + caport: "443" + catls: + cacert: "${ORG0_CA_CERT}" + enrollid: "orderer2" + enrollsecret: "orderer2pw" + csr: + hosts: + - "org0-orderersnode2" + - "org0-orderersnode2.${KUBE_DNS_DOMAIN}" + - enrollment: + component: + caname: ca + cahost: test-network-org0-ca-ca.${DOMAIN} + caport: "443" + catls: + cacert: "${ORG0_CA_CERT}" + enrollid: "orderer3" + enrollsecret: "orderer3pw" + tls: + caname: tlsca + cahost: test-network-org0-ca-ca.${DOMAIN} + caport: "443" + catls: + cacert: "${ORG0_CA_CERT}" + enrollid: "orderer3" + enrollsecret: "orderer3pw" + csr: + hosts: + - "org0-orderersnode3" + - "org0-orderersnode3.${KUBE_DNS_DOMAIN}" + + customNames: + pvc: {} + images: + ordererInitImage: ${INIT_IMAGE} + ordererInitTag: ${INIT_IMAGE_LABEL} + ordererImage: ${ORDERER_IMAGE} + ordererTag: ${ORDERER_IMAGE_LABEL} + grpcwebImage: ${GRPCWEB_IMAGE} + grpcwebTag: ${GRPCWEB_IMAGE_LABEL} + ingress: + class: "" + tlsSecretName: "" + mspID: OrdererMSP + ordererType: etcdraft + orgName: OrdererOrg + useChannelLess: true + systemChannelName: testchainid + resources: + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + orderer: + limits: + cpu: 600m + memory: 1200M + requests: + cpu: 10m + memory: 10M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + service: + type: ClusterIP + storage: + orderer: + class: "${STORAGE_CLASS}" + size: 5G diff --git a/sample-network/config/peers/kustomization.yaml b/sample-network/config/peers/kustomization.yaml new file mode 100644 index 00000000..a962324c --- /dev/null +++ b/sample-network/config/peers/kustomization.yaml @@ -0,0 +1,27 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - org1-peer1.yaml + - org1-peer2.yaml + - org2-peer1.yaml + - org2-peer2.yaml + diff --git a/sample-network/config/peers/org1-peer1.yaml b/sample-network/config/peers/org1-peer1.yaml new file mode 100644 index 00000000..ee44a59f --- /dev/null +++ b/sample-network/config/peers/org1-peer1.yaml @@ -0,0 +1,102 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPPeer +metadata: + name: org1-peer1 +spec: + version: "${FABRIC_VERSION}" + domain: "${DOMAIN}" + peerExternalEndpoint: "test-network-org1-peer1-peer.${DOMAIN}:443" + license: + accept: true + action: + enroll: {} + reenroll: {} + configoverride: + peer: + keepalive: + minInterval: 61s + customNames: + pvc: {} + images: + peerInitImage: ${INIT_IMAGE} + peerInitTag: ${INIT_IMAGE_LABEL} + peerImage: ${PEER_IMAGE} + peerTag: ${PEER_IMAGE_LABEL} + grpcwebImage: ${GRPCWEB_IMAGE} + grpcwebTag: ${GRPCWEB_IMAGE_LABEL} + mspID: Org1MSP + mspSecret: org1-peer1-secret + secret: + enrollment: + component: + caname: ca + cahost: "test-network-org1-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG1_CA_CERT}" + enrollid: "peer1" + enrollsecret: "peer1pw" + tls: + caname: tlsca + cahost: "test-network-org1-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG1_CA_CERT}" + enrollid: "peer1" + enrollsecret: "peer1pw" + csr: + hosts: + - "org1-peer1" + - "org1-peer1.${KUBE_DNS_DOMAIN}" + chaincodeBuilderConfig: + peername: org1-peer1 + service: + type: ClusterIP + stateDb: leveldb + storage: + peer: + class: "${STORAGE_CLASS}" + size: 5G + statedb: + class: "${STORAGE_CLASS}" + size: 10Gi + resources: + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + peer: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 10m + memory: 10M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M \ No newline at end of file diff --git a/sample-network/config/peers/org1-peer2.yaml b/sample-network/config/peers/org1-peer2.yaml new file mode 100644 index 00000000..ac544a9e --- /dev/null +++ b/sample-network/config/peers/org1-peer2.yaml @@ -0,0 +1,102 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPPeer +metadata: + name: org1-peer2 +spec: + version: "${FABRIC_VERSION}" + domain: "${DOMAIN}" + peerExternalEndpoint: "test-network-org1-peer2-peer.${DOMAIN}:443" + license: + accept: true + action: + enroll: {} + reenroll: {} + configoverride: + peer: + keepalive: + minInterval: 61s + customNames: + pvc: {} + images: + peerInitImage: ${INIT_IMAGE} + peerInitTag: ${INIT_IMAGE_LABEL} + peerImage: ${PEER_IMAGE} + peerTag: ${PEER_IMAGE_LABEL} + grpcwebImage: ${GRPCWEB_IMAGE} + grpcwebTag: ${GRPCWEB_IMAGE_LABEL} + mspID: Org1MSP + mspSecret: org1-peer2-secret + secret: + enrollment: + component: + caname: ca + cahost: "test-network-org1-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG1_CA_CERT}" + enrollid: "peer2" + enrollsecret: "peer2pw" + tls: + caname: tlsca + cahost: "test-network-org1-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG1_CA_CERT}" + enrollid: "peer2" + enrollsecret: "peer2pw" + csr: + hosts: + - "org1-peer2" + - "org1-peer2.${KUBE_DNS_DOMAIN}" + chaincodeBuilderConfig: + peername: org1-peer2 + service: + type: ClusterIP + stateDb: leveldb + storage: + peer: + class: "${STORAGE_CLASS}" + size: 5G + statedb: + class: "${STORAGE_CLASS}" + size: 10Gi + resources: + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + peer: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 10m + memory: 10M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M \ No newline at end of file diff --git a/sample-network/config/peers/org2-peer1.yaml b/sample-network/config/peers/org2-peer1.yaml new file mode 100644 index 00000000..0feb4cf8 --- /dev/null +++ b/sample-network/config/peers/org2-peer1.yaml @@ -0,0 +1,102 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPPeer +metadata: + name: org2-peer1 +spec: + version: "${FABRIC_VERSION}" + domain: "${DOMAIN}" + peerExternalEndpoint: "test-network-org2-peer1-peer.${DOMAIN}:443" + license: + accept: true + action: + enroll: {} + reenroll: {} + configoverride: + peer: + keepalive: + minInterval: 61s + customNames: + pvc: {} + images: + peerInitImage: ${INIT_IMAGE} + peerInitTag: ${INIT_IMAGE_LABEL} + peerImage: ${PEER_IMAGE} + peerTag: ${PEER_IMAGE_LABEL} + grpcwebImage: ${GRPCWEB_IMAGE} + grpcwebTag: ${GRPCWEB_IMAGE_LABEL} + mspID: Org2MSP + mspSecret: org2-peer1-secret + secret: + enrollment: + component: + caname: ca + cahost: "test-network-org2-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG2_CA_CERT}" + enrollid: "peer1" + enrollsecret: "peer1pw" + tls: + caname: tlsca + cahost: "test-network-org2-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG2_CA_CERT}" + enrollid: "peer1" + enrollsecret: "peer1pw" + csr: + hosts: + - "org2-peer1" + - "org2-peer1.${KUBE_DNS_DOMAIN}" + chaincodeBuilderConfig: + peername: org2-peer1 + service: + type: ClusterIP + stateDb: leveldb + storage: + peer: + class: "${STORAGE_CLASS}" + size: 5G + statedb: + class: "${STORAGE_CLASS}" + size: 10Gi + resources: + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + peer: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 10m + memory: 10M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M \ No newline at end of file diff --git a/sample-network/config/peers/org2-peer2.yaml b/sample-network/config/peers/org2-peer2.yaml new file mode 100644 index 00000000..e9677d47 --- /dev/null +++ b/sample-network/config/peers/org2-peer2.yaml @@ -0,0 +1,102 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: ibp.com/v1beta1 +kind: IBPPeer +metadata: + name: org2-peer2 +spec: + version: "${FABRIC_VERSION}" + domain: "${DOMAIN}" + peerExternalEndpoint: "test-network-org2-peer2-peer.${DOMAIN}:443" + license: + accept: true + action: + enroll: {} + reenroll: {} + configoverride: + peer: + keepalive: + minInterval: 61s + customNames: + pvc: {} + images: + peerInitImage: ${INIT_IMAGE} + peerInitTag: ${INIT_IMAGE_LABEL} + peerImage: ${PEER_IMAGE} + peerTag: ${PEER_IMAGE_LABEL} + grpcwebImage: ${GRPCWEB_IMAGE} + grpcwebTag: ${GRPCWEB_IMAGE_LABEL} + mspID: Org2MSP + mspSecret: org2-peer2-secret + secret: + enrollment: + component: + caname: ca + cahost: "test-network-org2-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG2_CA_CERT}" + enrollid: "peer2" + enrollsecret: "peer2pw" + tls: + caname: tlsca + cahost: "test-network-org2-ca-ca.${DOMAIN}" + caport: "443" + catls: + cacert: "${ORG2_CA_CERT}" + enrollid: "peer2" + enrollsecret: "peer2pw" + csr: + hosts: + - "org2-peer2" + - "org2-peer2.${KUBE_DNS_DOMAIN}" + chaincodeBuilderConfig: + peername: org2-peer2 + service: + type: ClusterIP + stateDb: leveldb + storage: + peer: + class: "${STORAGE_CLASS}" + size: 5G + statedb: + class: "${STORAGE_CLASS}" + size: 10Gi + resources: + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M + peer: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 10m + memory: 10M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 10m + memory: 10M \ No newline at end of file diff --git a/sample-network/config/rbac/hlf-operator-clusterrole.yaml b/sample-network/config/rbac/hlf-operator-clusterrole.yaml new file mode 100644 index 00000000..fb35269b --- /dev/null +++ b/sample-network/config/rbac/hlf-operator-clusterrole.yaml @@ -0,0 +1,205 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hlf-operator-role + labels: + release: "operator" + helm.sh/chart: "ibm-hlfsupport" + app.kubernetes.io/name: "ibm-hlfsupport" + app.kubernetes.io/instance: "ibm-hlfsupport" + app.kubernetes.io/managed-by: "ibm-hlfsupport-operator" +rules: + - apiGroups: + - extensions + resourceNames: + - ibm-hlfsupport-psp + resources: + - podsecuritypolicies + verbs: + - use + - apiGroups: + - apiextensions.k8s.io + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "" + resources: + - pods + - pods/log + - persistentvolumeclaims + - persistentvolumes + - services + - endpoints + - events + - configmaps + - secrets + - nodes + - serviceaccounts + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "batch" + resources: + - jobs + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "authorization.openshift.io" + - "rbac.authorization.k8s.io" + resources: + - roles + - rolebindings + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - bind + - escalate + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - apps + resourceNames: + - ibm-hlfsupport-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - ibp.com + resources: + - ibpcas.ibp.com + - ibppeers.ibp.com + - ibporderers.ibp.com + - ibpconsoles.ibp.com + - ibpcas + - ibppeers + - ibporderers + - ibpconsoles + - ibpcas/finalizers + - ibppeers/finalizers + - ibporderers/finalizers + - ibpconsoles/finalizers + - ibpcas/status + - ibppeers/status + - ibporderers/status + - ibpconsoles/status + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - extensions + - networking.k8s.io + - config.openshift.io + resources: + - ingresses + - networkpolicies + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection diff --git a/sample-network/config/rbac/hlf-operator-clusterrolebinding.yaml b/sample-network/config/rbac/hlf-operator-clusterrolebinding.yaml new file mode 100644 index 00000000..195c06d7 --- /dev/null +++ b/sample-network/config/rbac/hlf-operator-clusterrolebinding.yaml @@ -0,0 +1,36 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hlf-operator-rolebinding + labels: + release: "operator" + helm.sh/chart: "ibm-hlfsupport" + app.kubernetes.io/name: "ibm-hlfsupport" + app.kubernetes.io/instance: "ibm-hlfsupport" + app.kubernetes.io/managed-by: "ibm-hlfsupport-operator" +subjects: + - kind: ServiceAccount + name: hlf-operator + namespace: "${NS}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hlf-operator-role diff --git a/sample-network/config/rbac/hlf-operator-rolebinding.yaml b/sample-network/config/rbac/hlf-operator-rolebinding.yaml new file mode 100644 index 00000000..04f948e8 --- /dev/null +++ b/sample-network/config/rbac/hlf-operator-rolebinding.yaml @@ -0,0 +1,19 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +kubectl -n $NS create rolebinding hlf-operator-rolebinding --clusterrole=hlf-operator-role --group=system:serviceaccounts:test-network -- diff --git a/sample-network/config/rbac/hlf-operator-serviceaccount.yaml b/sample-network/config/rbac/hlf-operator-serviceaccount.yaml new file mode 100644 index 00000000..7cb3420d --- /dev/null +++ b/sample-network/config/rbac/hlf-operator-serviceaccount.yaml @@ -0,0 +1,22 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hlf-operator diff --git a/sample-network/config/rbac/hlf-psp.yaml b/sample-network/config/rbac/hlf-psp.yaml new file mode 100644 index 00000000..dcd53c72 --- /dev/null +++ b/sample-network/config/rbac/hlf-psp.yaml @@ -0,0 +1,48 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: ibm-hlfsupport-psp +spec: + hostIPC: false + hostNetwork: false + hostPID: false + privileged: true + allowPrivilegeEscalation: true + readOnlyRootFilesystem: false + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + requiredDropCapabilities: + - ALL + allowedCapabilities: + - NET_BIND_SERVICE + - CHOWN + - DAC_OVERRIDE + - SETGID + - SETUID + - FOWNER + volumes: + - '*' diff --git a/sample-network/config/rbac/kustomization.yaml b/sample-network/config/rbac/kustomization.yaml new file mode 100644 index 00000000..9a5132c7 --- /dev/null +++ b/sample-network/config/rbac/kustomization.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - hlf-psp.yaml + - hlf-operator-serviceaccount.yaml + - hlf-operator-clusterrole.yaml + - hlf-operator-clusterrolebinding.yaml + # - hlf-operator-rolebinding.yaml diff --git a/sample-network/network b/sample-network/network new file mode 100755 index 00000000..f589142f --- /dev/null +++ b/sample-network/network @@ -0,0 +1,194 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +set -o errexit + +cd "$(dirname "$0")" + +# Set an environment variable based on an optional override (TEST_NETWORK_${name}) +# from the calling shell. If the override is not available, assign the parameter +# to a default value. +function context() { + local name=$1 + local default_value=$2 + local override_name=TEST_NETWORK_${name} + + export ${name}="${!override_name:-${default_value}}" +} + +context FABRIC_VERSION 2.4.3 +context FABRIC_CA_VERSION 1.5.3 + +context CLUSTER_RUNTIME kind # or k3s for Rancher +context CONTAINER_CLI docker # or nerdctl for containerd +context CONTAINER_NAMESPACE "" # or "--namespace k8s.io" for containerd / nerdctl +context STORAGE_CLASS standard +context KUSTOMIZE_BUILD "kubectl kustomize" +context STAGE_DOCKER_IMAGES true +context FABRIC_CONTAINER_REGISTRY hyperledger + +context NAME test-network +context NS $NAME +context CLUSTER_NAME $CLUSTER_RUNTIME +context DOMAIN local.fabric.network +context KUBE_DNS_DOMAIN ${NS}.svc.cluster.local +context LOG_FILE network.log +context DEBUG_FILE network-debug.log +context LOG_ERROR_LINES 1 +context LOCAL_REGISTRY_NAME kind-registry +context LOCAL_REGISTRY_PORT 5000 +context NGINX_HTTP_PORT 80 +context NGINX_HTTPS_PORT 443 + +context CONSOLE_DOMAIN $DOMAIN +context CONSOLE_USERNAME admin +context CONSOLE_PASSWORD password + +# TODO: use new cc logic from test-network +context CHANNEL_NAME mychannel +context CHAINCODE_NAME asset-transfer-basic +context CHAINCODE_IMAGE ghcr.io/hyperledgendary/fabric-ccaas-asset-transfer-basic:latest +context CHAINCODE_LABEL basic_1.0 + +context CA_IMAGE ${FABRIC_CONTAINER_REGISTRY}/fabric-ca +context CA_IMAGE_LABEL ${FABRIC_CA_VERSION} +context PEER_IMAGE ${FABRIC_CONTAINER_REGISTRY}/fabric-peer +context PEER_IMAGE_LABEL ${FABRIC_VERSION} +context ORDERER_IMAGE ${FABRIC_CONTAINER_REGISTRY}/fabric-orderer +context ORDERER_IMAGE_LABEL ${FABRIC_VERSION} +context TOOLS_IMAGE ${FABRIC_CONTAINER_REGISTRY}/fabric-tools +context TOOLS_IMAGE ${FABRIC_VERSION} +context OPERATOR_IMAGE ghcr.io/ibm-blockchain/fabric-operator +context OPERATOR_IMAGE_LABEL latest-amd64 +context INIT_IMAGE registry.access.redhat.com/ubi8/ubi-minimal +context INIT_IMAGE_LABEL latest +context GRPCWEB_IMAGE ghcr.io/hyperledger-labs/grpc-web +context GRPCWEB_IMAGE_LABEL latest +context COUCHDB_IMAGE couchdb +context COUCHDB_IMAGE_LABEL 3.2.1 +context CONSOLE_IMAGE ghcr.io/hyperledger-labs/fabric-console +context CONSOLE_IMAGE_LABEL latest +context DEPLOYER_IMAGE ghcr.io/ibm-blockchain/fabric-deployer +context DEPLOYER_IMAGE_LABEL latest-amd64 + +export FABRIC_OPERATOR_IMAGE=${OPERATOR_IMAGE}:${OPERATOR_IMAGE_LABEL} +export FABRIC_CONSOLE_IMAGE=${CONSOLE_IMAGE}:${CONSOLE_IMAGE_LABEL} +export FABRIC_DEPLOYER_IMAGE=${DEPLOYER_IMAGE}:${DEPLOYER_IMAGE_LABEL} +export FABRIC_CA_IMAGE=${CA_IMAGE}:${CA_IMAGE_LABEL} +export FABRIC_PEER_IMAGE=${PEER_IMAGE}:${PEER_IMAGE_LABEL} +export FABRIC_ORDERER_IMAGE=${ORDERER_IMAGE}:${ORDERER_IMAGE_LABEL} +export FABRIC_TOOLS_IMAGE=${TOOLS_IMAGE}:${TOOLS_IMAGE_LABEL} + +export TEMP_DIR=${PWD}/temp + + +function print_help() { + log + log "--- Fabric Information" + log "Fabric Version \t\t: ${FABRIC_VERSION}" + log "Fabric CA Version \t: ${FABRIC_CA_VERSION}" + log "Container Registry \t: ${FABRIC_CONTAINER_REGISTRY}" + log "Network name \t\t: ${NAME}" + log "Channel name \t\t: ${CHANNEL_NAME}" + log + log "--- Chaincode Information" + log "Chaincode name \t\t: ${CHAINCODE_NAME}" + log "Chaincode image \t: ${CHAINCODE_IMAGE}" + log "Chaincode label \t: ${CHAINCODE_LABEL}" + log + log "--- Cluster Information" + log "Cluster runtime \t: ${CLUSTER_RUNTIME}" + log "Cluster name \t\t: ${CLUSTER_NAME}" + log "Cluster namespace \t: ${NS}" + log "Fabric Registry \t: ${FABRIC_CONTAINER_REGISTRY}" + log "Local Registry \t\t: ${LOCAL_REGISTRY_NAME}" + log "Local Registry port \t: ${LOCAL_REGISTRY_PORT}" + log "nginx http port \t: ${NGINX_HTTP_PORT}" + log "nginx https port \t: ${NGINX_HTTPS_PORT}" + log + log "--- Script Information" + log "Log file \t\t: ${LOG_FILE}" + log "Debug log file \t\t: ${DEBUG_FILE}" + log + + echo todo: help output, parse mode, flags, env, etc. +} + +. scripts/utils.sh +. scripts/prereqs.sh +. scripts/kind.sh +. scripts/cluster.sh +. scripts/console.sh +. scripts/test_network.sh +. scripts/channel.sh +. scripts/chaincode.sh + +# check for kind, kubectl, etc. +check_prereqs + +# Initialize the logging system - control output to 'network.log' and everything else to 'network-debug.log' +logging_init + +## Parse mode +if [[ $# -lt 1 ]] ; then + print_help + exit 0 +else + MODE=$1 + shift +fi + +if [ "${MODE}" == "kind" ]; then + log "Initializing kind cluster \"${CLUSTER_NAME}\":" + kind_init + log "🏁 - Cluster is ready." + +elif [ "${MODE}" == "unkind" ]; then + log "Deleting kind cluster \"${CLUSTER_NAME}\":" + kind_delete + log "🏁 - Cluster is gone." + +elif [[ "${MODE}" == "cluster" || "${MODE}" == "k8s" || "${MODE}" == "kube" ]]; then + cluster_command_group $@ + +elif [ "${MODE}" == "channel" ]; then + channel_command_group $@ + +elif [[ "${MODE}" == "chaincode" || "${MODE}" == "cc" ]]; then + chaincode_command_group $@ + +elif [ "${MODE}" == "up" ]; then + log "Launching network \"${NAME}\":" + network_up + log "🏁 - Network is ready." + +elif [ "${MODE}" == "down" ]; then + log "Shutting down test network \"${NAME}\":" + network_down + log "🏁 - Fabric network is down." + +elif [ "${MODE}" == "console" ]; then + log "Launching Fabric Operations Console" + console_up + log "🏁 - Console is ready" + +else + print_help + exit 1 +fi + diff --git a/sample-network/scripts/chaincode.sh b/sample-network/scripts/chaincode.sh new file mode 100755 index 00000000..994955e6 --- /dev/null +++ b/sample-network/scripts/chaincode.sh @@ -0,0 +1,383 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Convenience routine to "do everything" required to bring up a sample CC. +function deploy_chaincode() { + local cc_name=$1 + local cc_label=$2 + local cc_folder=$(absolute_path $3) + + local temp_folder=$(mktemp -d) + local cc_package=${temp_folder}/${cc_name}.tgz + + package_chaincode ${cc_label} ${cc_name} ${cc_package} + + set_chaincode_id ${cc_package} + set_chaincode_image ${cc_folder} + + build_chaincode_image ${cc_folder} ${CHAINCODE_IMAGE} + + if [ "${CLUSTER_RUNTIME}" == "kind" ]; then + kind_load_image ${CHAINCODE_IMAGE} + fi + + launch_chaincode ${cc_name} ${CHAINCODE_ID} ${CHAINCODE_IMAGE} + activate_chaincode ${cc_name} ${cc_package} +} + +# Infer a reasonable name for the chaincode image based on the folder path conventions, or +# allow the user to override with TEST_NETWORK_CHAINCODE_IMAGE. +function set_chaincode_image() { + local cc_folder=$1 + + if [ -z "$TEST_NETWORK_CHAINCODE_IMAGE" ]; then + # cc_folder path starting with first index of "fabric-samples" + CHAINCODE_IMAGE=${cc_folder/*fabric-samples/fabric-samples} + else + CHAINCODE_IMAGE=${TEST_NETWORK_CHAINCODE_IMAGE} + fi +} + +# Convenience routine to "do everything other than package and launch" a sample CC. +# When debugging a chaincode server, the process must be launched prior to completing +# the chaincode lifecycle at the peer. This routine provides a route for packaging +# and installing the chaincode out of band, and a single target to complete the peer +# chaincode lifecycle. +function activate_chaincode() { + local cc_name=$1 + local cc_package=$2 + + set_chaincode_id ${cc_package} + + install_chaincode ${cc_package} + approve_chaincode ${cc_name} ${CHAINCODE_ID} + commit_chaincode ${cc_name} +} + +function query_chaincode() { + local cc_name=$1 + shift + + set -x + + export_peer_context 1 1 + + peer chaincode query \ + -n $cc_name \ + -C $CHANNEL_NAME \ + -c $@ +} + +function query_chaincode_metadata() { + local cc_name=$1 + shift + + set -x + local args='{"Args":["org.hyperledger.fabric:GetMetadata"]}' + + log '' + log 'Org1-Peer1:' + export_peer_context 1 1 + peer chaincode query -n $cc_name -C $CHANNEL_NAME -c $args +# +# log '' +# log 'Org1-Peer2:' +# export_peer_context 1 2 +# peer chaincode query -n $cc_name -C $CHANNEL_NAME -c $args +} + +function invoke_chaincode() { + local cc_name=$1 + shift + + export_peer_context 1 1 + + peer chaincode invoke \ + -n $cc_name \ + -C $CHANNEL_NAME \ + -c $@ \ + --orderer ${NS}-org0-orderersnode1-orderer.${DOMAIN}:443 \ + --tls --cafile ${TEMP_DIR}/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + + sleep 2 +} + +function build_chaincode_image() { + local cc_folder=$1 + local cc_image=$2 + + push_fn "Building chaincode image ${cc_image}" + + $CONTAINER_CLI build ${CONTAINER_NAMESPACE} -t ${cc_image} ${cc_folder} + + pop_fn +} + +function kind_load_image() { + local cc_image=$1 + + push_fn "Loading chaincode to kind image plane" + + kind load docker-image ${cc_image} + + pop_fn +} + +function package_chaincode() { + local cc_label=$1 + local cc_name=$2 + local cc_archive=$3 + + local cc_folder=$(dirname $cc_archive) + local archive_name=$(basename $cc_archive) + + push_fn "Packaging chaincode ${cc_label}" + + mkdir -p ${cc_folder} + + # Allow the user to override the service URL for the endpoint. This allows, for instance, + # local debugging at the 'host.docker.internal' DNS alias. + local cc_default_address="{{.peername}}-ccaas-${cc_name}:9999" + local cc_address=${TEST_NETWORK_CHAINCODE_ADDRESS:-$cc_default_address} + + cat << EOF > ${cc_folder}/connection.json +{ + "address": "${cc_address}", + "dial_timeout": "10s", + "tls_required": false +} +EOF + + cat << EOF > ${cc_folder}/metadata.json +{ + "type": "ccaas", + "label": "${cc_label}" +} +EOF + + tar -C ${cc_folder} -zcf ${cc_folder}/code.tar.gz connection.json + tar -C ${cc_folder} -zcf ${cc_archive} code.tar.gz metadata.json + + rm ${cc_folder}/code.tar.gz + + pop_fn +} + +function launch_chaincode_service() { + local org=$1 + local peer=$2 + local cc_name=$3 + local cc_id=$4 + local cc_image=$5 + push_fn "Launching chaincode container \"${cc_image}\"" + + cat << EOF | envsubst | kubectl -n $NS apply -f - +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${org}-${peer}-ccaas-${cc_name} +spec: + replicas: 1 + selector: + matchLabels: + app: ${org}-${peer}-ccaas-${cc_name} + template: + metadata: + labels: + app: ${org}-${peer}-ccaas-${cc_name} + spec: + containers: + - name: main + image: ${cc_image} + imagePullPolicy: IfNotPresent + env: + - name: CHAINCODE_SERVER_ADDRESS + value: 0.0.0.0:9999 + - name: CHAINCODE_ID + value: ${cc_id} + - name: CORE_CHAINCODE_ID_NAME + value: ${cc_id} + ports: + - containerPort: 9999 + +--- +apiVersion: v1 +kind: Service +metadata: + name: ${org}-${peer}-ccaas-${cc_name} +spec: + ports: + - name: chaincode + port: 9999 + protocol: TCP + selector: + app: ${org}-${peer}-ccaas-${cc_name} +EOF + + kubectl -n $NS rollout status deploy/${org}-${peer}-ccaas-${cc_name} + + pop_fn +} + +function launch_chaincode() { + local org=org1 + local cc_name=$1 + local cc_id=$2 + local cc_image=$3 + + launch_chaincode_service ${org} peer1 ${cc_name} ${cc_id} ${cc_image} +# launch_chaincode_service ${org} peer2 ${cc_name} ${cc_id} ${cc_image} +} + +function install_chaincode_for() { + local org=$1 + local peer=$2 + local cc_package=$3 + push_fn "Installing chaincode for org ${org} peer ${peer}" + + export_peer_context $org $peer + + peer lifecycle chaincode install $cc_package + + pop_fn +} + +# Package and install the chaincode, but do not activate. +function install_chaincode() { + local org=1 + local cc_package=$1 + + install_chaincode_for ${org} 1 ${cc_package} +# install_chaincode_for ${org} 2 ${cc_package} +} + +# approve the chaincode package for an org and assign a name +function approve_chaincode() { + local org=1 + local peer=1 + local cc_name=$1 + local cc_id=$2 + push_fn "Approving chaincode ${cc_name} with ID ${cc_id}" + + export_peer_context $org $peer + + peer lifecycle \ + chaincode approveformyorg \ + --channelID ${CHANNEL_NAME} \ + --name ${cc_name} \ + --version 1 \ + --package-id ${cc_id} \ + --sequence 1 \ + --orderer ${NS}-org0-orderersnode1-orderer.${DOMAIN}:443 \ + --tls --cafile ${TEMP_DIR}/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + + pop_fn +} + +# commit the named chaincode for an org +function commit_chaincode() { + local org=1 + local peer=1 + local cc_name=$1 + push_fn "Committing chaincode ${cc_name}" + + export_peer_context $org $peer + + peer lifecycle \ + chaincode commit \ + --channelID ${CHANNEL_NAME} \ + --name ${cc_name} \ + --version 1 \ + --sequence 1 \ + --orderer ${NS}-org0-orderersnode1-orderer.${DOMAIN}:443 \ + --tls --cafile ${TEMP_DIR}/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem + + pop_fn +} + +function set_chaincode_id() { + local cc_package=$1 + + cc_sha256=$(shasum -a 256 ${cc_package} | tr -s ' ' | cut -d ' ' -f 1) + cc_label=$(tar zxfO ${cc_package} metadata.json | jq -r '.label') + + CHAINCODE_ID=${cc_label}:${cc_sha256} +} + +# chaincode "group" commands. Like "main" for chaincode sub-command group. +function chaincode_command_group() { + set -x + + COMMAND=$1 + shift + + if [ "${COMMAND}" == "deploy" ]; then + log "Deploying chaincode" + deploy_chaincode $@ + log "🏁 - Chaincode is ready." + + elif [ "${COMMAND}" == "activate" ]; then + log "Activating chaincode" + activate_chaincode $@ + log "🏁 - Chaincode is ready." + + elif [ "${COMMAND}" == "package" ]; then + log "Packaging chaincode" + package_chaincode $@ + log "🏁 - Chaincode package is ready." + + elif [ "${COMMAND}" == "id" ]; then + set_chaincode_id $@ + log $CHAINCODE_ID + + elif [ "${COMMAND}" == "launch" ]; then + log "Launching chaincode services" + launch_chaincode $@ + log "🏁 - Chaincode services are ready" + + elif [ "${COMMAND}" == "install" ]; then + log "Installing chaincode for org1" + install_chaincode $@ + log "🏁 - Chaincode is installed" + + elif [ "${COMMAND}" == "approve" ]; then + log "Approving chaincode for org1" + approve_chaincode $@ + log "🏁 - Chaincode is approved" + + elif [ "${COMMAND}" == "commit" ]; then + log "Committing chaincode for org1" + commit_chaincode $@ + log "🏁 - Chaincode is committed" + + elif [ "${COMMAND}" == "invoke" ]; then + invoke_chaincode $@ 2>> ${LOG_FILE} + + elif [ "${COMMAND}" == "query" ]; then + query_chaincode $@ >> ${LOG_FILE} + + elif [ "${COMMAND}" == "metadata" ]; then + query_chaincode_metadata $@ >> ${LOG_FILE} + + else + print_help + exit 1 + fi +} diff --git a/sample-network/scripts/channel.sh b/sample-network/scripts/channel.sh new file mode 100644 index 00000000..d2de348a --- /dev/null +++ b/sample-network/scripts/channel.sh @@ -0,0 +1,289 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + # todo: Refuse to overwrite an existing admin enrollment ? + + +function channel_command_group() { + # set -x + + COMMAND=$1 + shift + + if [ "${COMMAND}" == "create" ]; then + log "Creating channel \"${CHANNEL_NAME}\":" + channel_up + log "🏁 - Channel is ready." + + else + print_help + exit 1 + fi +} + +function channel_up() { + set -x + + enroll_org_admins + + create_channel_msp + create_genesis_block + + join_channel_orderers + join_channel_peers +} + +# create an enrollment MSP config.yaml +function create_msp_config_yaml() { + local ca_name=$1 + local ca_cert_name=$2 + local msp_dir=$3 + echo "Creating msp config ${msp_dir}/config.yaml with cert ${ca_cert_name}" + + cat << EOF > ${msp_dir}/config.yaml +NodeOUs: + Enable: true + ClientOUIdentifier: + Certificate: cacerts/${ca_cert_name} + OrganizationalUnitIdentifier: client + PeerOUIdentifier: + Certificate: cacerts/${ca_cert_name} + OrganizationalUnitIdentifier: peer + AdminOUIdentifier: + Certificate: cacerts/${ca_cert_name} + OrganizationalUnitIdentifier: admin + OrdererOUIdentifier: + Certificate: cacerts/${ca_cert_name} + OrganizationalUnitIdentifier: orderer +EOF +} + +function get_connection_profile() { + local node_name=$1 + local connection_profile=$2 + + mkdir -p $(dirname ${connection_profile}) + + echo "writing $node_name connection profile to $connection_profile" + + kubectl -n $NS get cm/${node_name}-connection-profile -o json \ + | jq -r .binaryData.\"profile.json\" \ + | base64 -d \ + > ${connection_profile} +} + +function enroll_org_admin() { + local type=$1 + local org=$2 + local username=$3 + local password=$4 + + echo "Enrolling $type org admin $username" + + ENROLLMENTS_DIR=${TEMP_DIR}/enrollments + ORG_ADMIN_DIR=${ENROLLMENTS_DIR}/${org}/users/${username} + + # skip the enrollment if the admin certificate is available. + if [ -f "${ORG_ADMIN_DIR}/msp/keystore/key.pem" ]; then + echo "Found an existing admin enrollment at ${ORG_ADMIN_DIR}" + return + fi + + # Retrieve the CA information from Kubernetes + CA_NAME=${org}-ca + CA_DIR=${TEMP_DIR}/cas/${CA_NAME} + CONNECTION_PROFILE=${CA_DIR}/connection-profile.json + + get_connection_profile $CA_NAME $CONNECTION_PROFILE + + # extract the CA enrollment URL and tls cert from the org connection profile + CA_AUTH=${username}:${password} + CA_ENDPOINT=$(jq -r .endpoints.api $CONNECTION_PROFILE) + CA_HOST=$(echo ${CA_ENDPOINT} | cut -d/ -f3 | tr ':' '\n' | head -1) + CA_PORT=$(echo ${CA_ENDPOINT} | cut -d/ -f3 | tr ':' '\n' | tail -1) + CA_URL=https://${CA_AUTH}@${CA_HOST}:${CA_PORT} + + jq -r .tls.cert $CONNECTION_PROFILE | base64 -d >& $CA_DIR/tls-cert.pem + + # enroll the admin user + FABRIC_CA_CLIENT_HOME=${ORG_ADMIN_DIR} fabric-ca-client enroll --url ${CA_URL} --tls.certfiles ${CA_DIR}/tls-cert.pem + + # Construct an msp config.yaml + CA_CERT_NAME=${NS}-${CA_NAME}-ca-$(echo $DOMAIN | tr -s . -)-${CA_PORT}.pem + + create_msp_config_yaml ${CA_NAME} ${CA_CERT_NAME} ${ORG_ADMIN_DIR}/msp + + # private keys are hashed by name, but we only support one enrollment. + # test-network examples refer to this as "server.key", which is incorrect. + # This is the private key used to endorse transactions using the admin's + # public key. + mv ${ORG_ADMIN_DIR}/msp/keystore/*_sk ${ORG_ADMIN_DIR}/msp/keystore/key.pem + + + # enroll the admin user at the TLS CA - used for the channel admin API + FABRIC_CA_CLIENT_HOME=${ORG_ADMIN_DIR} \ + fabric-ca-client enroll \ + --url ${CA_URL} \ + --tls.certfiles ${CA_DIR}/tls-cert.pem \ + --mspdir ${ORG_ADMIN_DIR}/tls \ + --caname tlsca + + mv ${ORG_ADMIN_DIR}/tls/keystore/*_sk ${ORG_ADMIN_DIR}/tls/keystore/key.pem +} + +function enroll_org_admins() { + push_fn "Enrolling org admin users" + + enroll_org_admin orderer org0 org0admin org0adminpw + enroll_org_admin peer org1 org1admin org1adminpw + enroll_org_admin peer org2 org2admin org2adminpw + + pop_fn +} + +function create_channel_org_msp() { + local type=$1 + local org=$2 + echo "Creating channel org $org MSP" + + CA_DIR=${TEMP_DIR}/cas/${org}-ca + ORG_MSP_DIR=${TEMP_DIR}/channel-msp/${type}Organizations/${org}/msp + + mkdir -p ${ORG_MSP_DIR}/cacerts + mkdir -p ${ORG_MSP_DIR}/tlscacerts + + jq -r .ca.signcerts ${CA_DIR}/connection-profile.json | base64 -d >& ${ORG_MSP_DIR}/cacerts/ca-signcert.pem + jq -r .tlsca.signcerts ${CA_DIR}/connection-profile.json | base64 -d >& ${ORG_MSP_DIR}/tlscacerts/tlsca-signcert.pem + + create_msp_config_yaml ${org}-ca ca-signcert.pem ${ORG_MSP_DIR} +} + +function create_channel_msp() { + push_fn "Creating channel MSP" + + create_channel_org_msp orderer org0 + create_channel_org_msp peer org1 + create_channel_org_msp peer org2 + + extract_orderer_tls_cert org0 orderersnode1 + extract_orderer_tls_cert org0 orderersnode2 + extract_orderer_tls_cert org0 orderersnode3 + + pop_fn +} + +function extract_orderer_tls_cert() { + local org=$1 + local orderer=$2 + + echo "Extracting TLS cert for $org $orderer" + + ORDERER_NAME=${org}-${orderer} + ORDERER_DIR=${TEMP_DIR}/channel-msp/ordererOrganizations/${org}/orderers/${ORDERER_NAME} + ORDERER_TLS_DIR=${ORDERER_DIR}/tls + CONNECTION_PROFILE=${ORDERER_DIR}/connection-profile.json + + get_connection_profile $ORDERER_NAME $CONNECTION_PROFILE + + mkdir -p $ORDERER_TLS_DIR/signcerts + + jq -r .tls.signcerts ${CONNECTION_PROFILE} \ + | base64 -d \ + >& $ORDERER_TLS_DIR/signcerts/tls-cert.pem +} + +function create_genesis_block() { + push_fn "Creating channel genesis block" + + mkdir -p ${TEMP_DIR}/config + cp ${PWD}/config/core.yaml ${TEMP_DIR}/config/ + + # The channel configtx file needs to specify dynamic elements from the environment, + # for instance, the ${DOMAIN} for ingress controller and service endpoints. + cat ${PWD}/config/configtx-template.yaml | envsubst > ${TEMP_DIR}/config/configtx.yaml + + FABRIC_CFG_PATH=${TEMP_DIR}/config \ + configtxgen \ + -profile TwoOrgsApplicationGenesis \ + -channelID $CHANNEL_NAME \ + -outputBlock ${TEMP_DIR}/genesis_block.pb + +# configtxgen -inspectBlock ${TEMP_DIR}/genesis_block.pb + + pop_fn +} + +function join_channel_orderers() { + push_fn "Joining orderers to channel ${CHANNEL_NAME}" + + join_channel_orderer org0 orderersnode1 + join_channel_orderer org0 orderersnode2 + join_channel_orderer org0 orderersnode3 + + # todo: readiness / liveiness equivalent for channel? Needs a little bit to settle before peers can join. + sleep 10 + + pop_fn +} + +# Request from the channel ADMIN api that the orderer joins the target channel +function join_channel_orderer() { + local org=$1 + local orderer=$2 + + # The client certificate presented in this case is the admin USER TLS enrollment key. This is a stronger assertion + # of identity than the Docker Compose network, which transmits the orderer NODE TLS key pair directly + + osnadmin channel join \ + --orderer-address ${NS}-${org}-${orderer}-admin.${DOMAIN} \ + --ca-file ${TEMP_DIR}/channel-msp/ordererOrganizations/${org}/orderers/${org}-${orderer}/tls/signcerts/tls-cert.pem \ + --client-cert ${TEMP_DIR}/enrollments/${org}/users/${org}admin/tls/signcerts/cert.pem \ + --client-key ${TEMP_DIR}/enrollments/${org}/users/${org}admin/tls/keystore/key.pem \ + --channelID ${CHANNEL_NAME} \ + --config-block ${TEMP_DIR}/genesis_block.pb +} + +function join_channel_peers() { + join_org_peers 1 + join_org_peers 2 +} + +function join_org_peers() { + local orgnum=$1 + push_fn "Joining org${orgnum} peers to channel ${CHANNEL_NAME}" + + # Join peers to channel + join_channel_peer $orgnum 1 + #join_channel_peer $orgnum 2 + + pop_fn +} + +function join_channel_peer() { + local orgnum=$1 + local peernum=$2 + + export_peer_context $orgnum $peernum + + peer channel join \ + --blockpath ${TEMP_DIR}/genesis_block.pb \ + --orderer ${NS}-org0-orderersnode1-orderer.${DOMAIN} \ + --tls \ + --cafile ${TEMP_DIR}/channel-msp/ordererOrganizations/org0/orderers/org0-orderersnode1/tls/signcerts/tls-cert.pem +} diff --git a/sample-network/scripts/cluster.sh b/sample-network/scripts/cluster.sh new file mode 100644 index 00000000..73f3b7e2 --- /dev/null +++ b/sample-network/scripts/cluster.sh @@ -0,0 +1,156 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# cluster "group" commands. Like "main" for the fabric-cli "cluster" sub-command +function cluster_command_group() { + + # Default COMMAND is 'init' if not specified + if [ "$#" -eq 0 ]; then + COMMAND="init" + + else + COMMAND=$1 + shift + fi + + if [ "${COMMAND}" == "init" ]; then + log "Initializing K8s cluster" + cluster_init + log "🏁 - Cluster is ready" + + elif [ "${COMMAND}" == "clean" ]; then + log "Cleaning k8s cluster" + cluster_clean + log "🏁 - Cluster is cleaned" + + elif [ "${COMMAND}" == "load-images" ]; then + log "Loading Docker images" + pull_docker_images + + if [ "${CLUSTER_RUNTIME}" == "kind" ]; then + kind_load_images + fi + + log "🏁 - Images are ready" + + else + print_help + exit 1 + fi +} + +function pull_docker_images() { + push_fn "Pulling docker images for Fabric ${FABRIC_VERSION}" + + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_OPERATOR_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_CONSOLE_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_DEPLOYER_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_CA_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_PEER_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_ORDERER_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $INIT_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $COUCHDB_IMAGE + $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $GRPCWEB_IMAGE + + pop_fn +} + +function kind_load_images() { + push_fn "Loading docker images to KIND control plane" + + kind load docker-image $FABRIC_OPERATOR_IMAGE + kind load docker-image $FABRIC_CONSOLE_IMAGE + kind load docker-image $FABRIC_DEPLOYER_IMAGE + kind load docker-image $FABRIC_CA_IMAGE + kind load docker-image $FABRIC_PEER_IMAGE + kind load docker-image $FABRIC_ORDERER_IMAGE + kind load docker-image $INIT_IMAGE + kind load docker-image $COUCHDB_IMAGE + kind load docker-image $GRPCWEB_IMAGE + + pop_fn +} + +function cluster_init() { + apply_fabric_crds + apply_nginx_ingress + + if [ "${STAGE_DOCKER_IMAGES}" == true ]; then + pull_docker_images + kind_load_images + fi + + wait_for_nginx_ingress +} + +function apply_fabric_crds() { + push_fn "Applying Fabric CRDs" + + $KUSTOMIZE_BUILD ../config/crd | kubectl apply -f - + + pop_fn +} + +function delete_fabric_crds() { + push_fn "Deleting Fabric CRDs" + + $KUSTOMIZE_BUILD ../config/crd | kubectl delete -f - + + pop_fn +} + +function apply_nginx_ingress() { + push_fn "Applying ingress controller" + + $KUSTOMIZE_BUILD ../config/ingress/${CLUSTER_RUNTIME} | kubectl apply -f - + + sleep 5 + + pop_fn +} + +function delete_nginx_ingress() { + push_fn "Deleting ${CLUSTER_RUNTIME} ingress controller" + + $KUSTOMIZE_BUILD ../config/ingress/${CLUSTER_RUNTIME} | kubectl delete -f - + + pop_fn +} + +function wait_for_nginx_ingress() { + push_fn "Waiting for ingress controller" + + kubectl wait --namespace ingress-nginx \ + --for=condition=ready pod \ + --selector=app.kubernetes.io/component=controller \ + --timeout=2m + + pop_fn +} + +function cluster_clean() { + delete_fabric_crds + delete_nginx_ingress +} + + + + + + diff --git a/sample-network/scripts/console.sh b/sample-network/scripts/console.sh new file mode 100644 index 00000000..4d696229 --- /dev/null +++ b/sample-network/scripts/console.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +function console_up() { + + init_namespace + + apply_operator + wait_for_deployment fabric-operator + + apply_console + wait_for_deployment hlf-console + + local console_hostname=${NS}-hlf-console-console + local console_url="https://${CONSOLE_USERNAME}:${CONSOLE_PASSWORD}@${console_hostname}.${CONSOLE_DOMAIN}" + + log "" + log "The Fabric Operations Console is available at ${console_url}" + log "" + + # TODO: prepare an FoC bulk JSON import for the test network assets + # log "Log into Console and import the asset archive at build/console/console_assets.zip" +} + +function apply_console() { + push_fn "Applying Fabric Operations Console" + + apply_kustomization config/console + + sleep 5 + + pop_fn +} \ No newline at end of file diff --git a/sample-network/scripts/kind.sh b/sample-network/scripts/kind.sh new file mode 100644 index 00000000..840ee048 --- /dev/null +++ b/sample-network/scripts/kind.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +function kind_create() { + push_fn "Creating cluster \"${CLUSTER_NAME}\"" + + # prevent the next kind cluster from using the previous Fabric network's enrollments. + rm -rf $PWD/temp + + # todo: always delete? Maybe return no-op if the cluster already exists? + kind delete cluster --name $CLUSTER_NAME + + local reg_name=${LOCAL_REGISTRY_NAME} + local reg_port=${LOCAL_REGISTRY_PORT} + local ingress_http_port=${NGINX_HTTP_PORT} + local ingress_https_port=${NGINX_HTTPS_PORT} + + # the 'ipvs'proxy mode permits better HA abilities + + cat </dev/null || true)" + if [ "${running}" != 'true' ]; then + docker run \ + -d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \ + registry:2 + fi + + # connect the registry to the cluster network + # (the network may already be connected) + docker network connect "kind" "${reg_name}" || true + + # Document the local registry + # https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry + cat < /dev/null + if [[ $? -ne 0 ]]; then + echo "No '${CONTAINER_CLI}' binary available?" + exit 1 + fi + + if [ "${CLUSTER_RUNTIME}" == "kind" ]; then + kind version > /dev/null + if [[ $? -ne 0 ]]; then + echo "No 'kind' binary available? (https://kind.sigs.k8s.io/docs/user/quick-start/#installation)" + exit 1 + fi + fi + + kubectl > /dev/null + if [[ $? -ne 0 ]]; then + echo "No 'kubectl' binary available? (https://kubernetes.io/docs/tasks/tools/)" + exit 1 + fi + + jq --version > /dev/null + if [[ $? -ne 0 ]]; then + echo "No 'jq' binary available? (https://stedolan.github.io/jq/)" + exit 1 + fi + + # Use the local fabric binaries if available. If not, go get them. + bin/peer version &> /dev/null + if [[ $? -ne 0 ]]; then + echo "Downloading LATEST Fabric binaries and config" + curl -sSL https://raw.githubusercontent.com/hyperledger/fabric/main/scripts/bootstrap.sh | bash -s -- -s -d + + # remove sample config files extracted by the installation script + rm config/configtx.yaml + #rm config/core.yaml + rm config/orderer.yaml + fi + + export PATH=bin:$PATH + + # Double-check that the binary transfer was OK + peer version > /dev/null + if [[ $? -ne 0 ]]; then + log "No 'peer' binary available?" + exit 1 + fi + + set -e +} \ No newline at end of file diff --git a/sample-network/scripts/test_network.sh b/sample-network/scripts/test_network.sh new file mode 100644 index 00000000..e065e43d --- /dev/null +++ b/sample-network/scripts/test_network.sh @@ -0,0 +1,153 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +function apply_operator() { + + apply_kustomization config/rbac + apply_kustomization config/manager + + sleep 2 +} + +function network_up() { + + init_namespace + + apply_operator + wait_for_deployment fabric-operator + + launch_network_CAs + + apply_network_peers + apply_network_orderers + + wait_for ibppeer org1-peer1 + wait_for ibppeer org1-peer2 + wait_for ibppeer org2-peer1 + wait_for ibppeer org2-peer2 + + wait_for ibporderer org0-orderersnode1 + wait_for ibporderer org0-orderersnode2 + wait_for ibporderer org0-orderersnode3 +} + +function init_namespace() { + push_fn "Creating namespace \"$NS\"" + + cat << EOF | kubectl apply -f - +apiVersion: v1 +kind: Namespace +metadata: + name: test-network +EOF + + pop_fn +} + +function delete_namespace() { + push_fn "Deleting namespace \"$NS\"" + + kubectl delete namespace $NS --ignore-not-found + + pop_fn +} + +function wait_for() { + local type=$1 + local name=$2 + + # wait for the operator to reconcile the CRD with a Deployment + kubectl -n $NS wait $type $name --for jsonpath='{.status.type}'=Deployed --timeout=60s + + # wait for the deployment to reach Ready + kubectl -n $NS rollout status deploy $name +} + +function launch_network_CAs() { + push_fn "Launching Fabric CAs" + + apply_kustomization config/cas + + # give the operator a chance to run the first reconciliation on the new resource + sleep 1 + + wait_for ibpca org0-ca + wait_for ibpca org1-ca + wait_for ibpca org2-ca + + # load CA TLS certificates into the env, for substitution into the peer and orderer CRDs + export ORG0_CA_CERT=$(kubectl -n $NS get cm/org0-ca-connection-profile -o json | jq -r .binaryData.\"profile.json\" | base64 -d | jq -r .tls.cert) + export ORG1_CA_CERT=$(kubectl -n $NS get cm/org1-ca-connection-profile -o json | jq -r .binaryData.\"profile.json\" | base64 -d | jq -r .tls.cert) + export ORG2_CA_CERT=$(kubectl -n $NS get cm/org2-ca-connection-profile -o json | jq -r .binaryData.\"profile.json\" | base64 -d | jq -r .tls.cert) + + pop_fn +} + +function apply_network_peers() { + push_fn "Launching Fabric Peers" + + apply_kustomization config/peers + + # give the operator a chance to run the first reconciliation on the new resource + sleep 1 + + pop_fn +} + +function apply_network_orderers() { + push_fn "Launching Fabric Orderers" + + apply_kustomization config/orderers + + # give the operator a chance to run the first reconciliation on the new resource + sleep 1 + + pop_fn +} + +function stop_services() { + push_fn "Stopping Fabric Services" + + undo_kustomization config/consoles + undo_kustomization config/cas + undo_kustomization config/peers + undo_kustomization config/orderers + + # give the operator a chance to reconcile the deletion and then shut down the operator. + sleep 10 + + undo_kustomization config/manager + + # scrub any residual bits + kubectl -n $NS delete deployment --all + kubectl -n $NS delete pod --all + kubectl -n $NS delete service --all + kubectl -n $NS delete configmap --all + kubectl -n $NS delete ingress --all + kubectl -n $NS delete secret --all + + pop_fn +} + +function network_down() { + stop_services + delete_namespace + + rm -rf $PWD/temp +} diff --git a/sample-network/scripts/utils.sh b/sample-network/scripts/utils.sh new file mode 100644 index 00000000..25c307d8 --- /dev/null +++ b/sample-network/scripts/utils.sh @@ -0,0 +1,156 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +function logging_init() { + # Reset the output and debug log files + printf '' > ${LOG_FILE} > ${DEBUG_FILE} + + # Write all output to the control flow log to STDOUT + tail -f ${LOG_FILE} & + + # Call the exit handler when we exit. + trap "exit_fn" EXIT + + # Send stdout and stderr from child programs to the debug log file + exec 1>>${DEBUG_FILE} 2>>${DEBUG_FILE} + + # There can be a race between the tail starting and the next log statement + sleep 0.5 +} + +function exit_fn() { + rc=$? + set +x + + # Write an error icon to the current logging statement. + if [ "0" -ne $rc ]; then + pop_fn $rc + fi + + # always remove the log trailer when the process exits. + pkill -P $$ +} + +function push_fn() { + #echo -ne " - entering ${FUNCNAME[1]} with arguments $@" + + echo -ne " - $@ ..." >> ${LOG_FILE} +} + +function log() { + echo -e $@ >> ${LOG_FILE} +} + +function pop_fn() { +# echo exiting ${FUNCNAME[1]} + + if [ $# -eq 0 ]; then + echo -ne "\r✅" >> ${LOG_FILE} + echo "" >> ${LOG_FILE} + return + fi + + local res=$1 + if [ $res -eq 0 ]; then + echo -ne "\r✅\n" >> ${LOG_FILE} + + elif [ $res -eq 1 ]; then + echo -ne "\r⚠️\n" >> ${LOG_FILE} + + elif [ $res -eq 2 ]; then + echo -ne "\r☠️\n" >> ${LOG_FILE} + + elif [ $res -eq 127 ]; then + echo -ne "\r☠️\n" >> ${LOG_FILE} + + else + echo -ne "\r\n" >> ${LOG_FILE} + fi + + if [ $res -ne 0 ]; then + tail -${LOG_ERROR_LINES} network-debug.log >> ${LOG_FILE} + fi + + #echo "" >> ${LOG_FILE} +} + +function wait_for_deployment() { + local name=$1 + push_fn "Waiting for deployment $name" + + kubectl -n $NS rollout status deploy $name + + pop_fn +} + +function absolute_path() { + local relative_path=$1 + + local abspath="$( cd "${relative_path}" && pwd )" + + echo $abspath +} + +function apply_kustomization() { + $KUSTOMIZE_BUILD $1 | envsubst | kubectl -n $NS apply -f - +} + +function undo_kustomization() { + $KUSTOMIZE_BUILD $1 | envsubst | kubectl -n $NS delete --ignore-not-found=true -f - +} + +function create_image_pull_secret() { + local secret=$1 + local registry=$2 + local username=$3 + local password=$4 + + push_fn "Creating $secret for access to $registry" + + kubectl -n $NS delete secret $secret --ignore-not-found + + # todo: can this be moved to a kustomization overlay? + kubectl -n $NS \ + create secret docker-registry \ + $secret \ + --docker-server="$registry" \ + --docker-username="$username" \ + --docker-password="$password" + + pop_fn +} + +function export_peer_context() { + local orgnum=$1 + local peernum=$2 + local org=org${orgnum} + local peer=peer${peernum} + +# export FABRIC_LOGGING_SPEC=DEBUG + + export FABRIC_CFG_PATH=${PWD}/config + export CORE_PEER_ADDRESS=${NS}-${org}-${peer}-peer.${DOMAIN}:443 + export CORE_PEER_LOCALMSPID=Org${orgnum}MSP + export CORE_PEER_TLS_ENABLED=true + export CORE_PEER_MSPCONFIGPATH=${TEMP_DIR}/enrollments/${org}/users/${org}admin/msp + export CORE_PEER_TLS_ROOTCERT_FILE=${TEMP_DIR}/channel-msp/peerOrganizations/${org}/msp/tlscacerts/tlsca-signcert.pem + +# export | egrep "CORE_|FABRIC_" +} diff --git a/scripts/check-license.sh b/scripts/check-license.sh new file mode 100755 index 00000000..56b960a1 --- /dev/null +++ b/scripts/check-license.sh @@ -0,0 +1,140 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cat << EOB > golang_copyright.txt +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +EOB + +cat << EOB > shell_copyright.txt +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +EOB + +function filterGeneratedFiles { + for f in $@; do + head -n5 $f | grep -qE 'Code generated by.*DO NOT EDIT' || echo $f + done +} + +function filterExcludedFiles { + CHECK=`echo "$CHECK" \ + | grep -v "^\.build/" \ + | grep -v "^\.git/" \ + | grep -v "^\.gitignore" \ + | grep -v "\.json$" \ + | grep -v "\.pem$" \ + | grep -v "\.crt$" \ + | grep -v "\.txt$" \ + | grep -v "\.md$" \ + | grep -v "_sk$" \ + | grep -v "\.key$" \ + | grep -v "\.gen\.go$" \ + | grep -v "tools/" \ + | grep -v "testdata/" \ + | grep -v "vendor/" \ + | grep -v "go.mod" \ + | grep -v "go.sum" \ + | grep -v .secrets.baseline \ + | grep -v .pre-commit-config.yaml \ + | grep -v .ibp.com_*.yaml \ + | grep -v .deepcopy.go \ + | sort -u` + + CHECK=$(filterGeneratedFiles "$CHECK") +} + +CHECK=$(git diff --name-only --diff-filter=ACMRTUXB HEAD) +filterExcludedFiles +if [[ -z "$CHECK" ]]; then + CHECK=$(git diff-tree --no-commit-id --name-only --diff-filter=ACMRTUXB -r "HEAD^..HEAD") + filterExcludedFiles +fi + +if [[ -z "$CHECK" ]]; then + echo "All files are excluded from having license headers" + exit 0 +fi + +missing=`echo "$CHECK" | xargs ls -d 2>/dev/null | xargs grep -L "SPDX-License-Identifier: Apache-2.0"` +if [[ -z "$missing" ]]; then + echo "All files have SPDX-License-Identifier: Apache-2.0" + exit 0 +fi + +TMPFILE="./tmpfile" + +for FILE in ${missing}; do + EXT="${FILE##*.}" + echo "Adding copyright notice to $FILE" + if [ "${EXT}" = "go" ]; then + cat golang_copyright.txt ${FILE} > ${TMPFILE} + cat ${TMPFILE} > ${FILE} + rm -f ${TMPFILE} + echo " ${FILE} copyright notice added" + elif [ "${EXT}" = "yaml" ]; then + cat shell_copyright.txt ${FILE} > ${TMPFILE} + cat ${TMPFILE} > ${FILE} + rm -f ${TMPFILE} + echo " ${FILE} copyright notice added" + elif [ "${EXT}" = "sh" ]; then + cat shell_copyright.txt ${FILE} > ${TMPFILE} + cat ${TMPFILE} > ${FILE} + rm -f ${TMPFILE} + echo " ${FILE} copyright notice added" + else + echo "invalid file extension" + fi +done + +rm golang_copyright.txt shell_copyright.txt + +exit 0 \ No newline at end of file diff --git a/scripts/checks.sh b/scripts/checks.sh new file mode 100755 index 00000000..eb547d27 --- /dev/null +++ b/scripts/checks.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Need to run this before go vet +go mod download + +echo "Running 'go vet'" +OUTPUT=`go vet -all ./... 2>&1` +if [ -n "$OUTPUT" ]; then + echo "The following files contain go vet errors" + echo $OUTPUT + exit 1 +fi +echo "No 'go vet' issues found" + +cd /tmp +go install golang.org/x/tools/cmd/goimports@ff88973b1e4e +cd - +echo "Checking imports ..." +found=`goimports -l \`find . -path ./vendor -prune -o -name "*.go" -print\` 2>&1` +found=$(echo "$found" | grep -v generated) +if [ "$found" != "" ]; then + echo "The following files have import problems:" + echo "$found" + echo "You may run 'goimports -w ' to fix each file." + exit 1 +fi +echo "All files are properly formatted" \ No newline at end of file diff --git a/scripts/download_binaries.sh b/scripts/download_binaries.sh new file mode 100755 index 00000000..16cfe082 --- /dev/null +++ b/scripts/download_binaries.sh @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#!/bin/bash -e +if [ ! -f ${PWD}/bin/fabric-ca-client ] || [ ! -f ${PWD}/bin/peer ] ; then + echo -e "\n\n======= Downloaing Fabric & Fabric-CA Binaries =========\n" + curl -sSL http://bit.ly/2ysbOFE | bash -s ${FABRIC_VERSION} ${FABRIC_CA_VERSION} -d -s +else + echo -e "\n\n======= Fabric Binaries already exists, Skipping download =========\n" +fi diff --git a/scripts/go-sec.sh b/scripts/go-sec.sh new file mode 100755 index 00000000..a7555226 --- /dev/null +++ b/scripts/go-sec.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +RELEASE=$(curl -s -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/securego/gosec/releases/latest | jq -r .tag_name) + +echo "Latest Gosec release determined to be $RELEASE... Installing..." + +curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $(go env GOPATH)/bin $RELEASE + +gosec ./... \ No newline at end of file diff --git a/scripts/install-tools.sh b/scripts/install-tools.sh new file mode 100755 index 00000000..8949ef44 --- /dev/null +++ b/scripts/install-tools.sh @@ -0,0 +1,41 @@ +#!/bin/bash -e + +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# cd /tmp +# go install golang.org/x/tools/cmd/goimports@latest +# curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash +# sudo mv kustomize /usr/local/bin +# go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0 +# cd - + +## getOperatorSDK +sudo rm /usr/local/bin/operator-sdk || true + +sdkVersion="1.16.0" +sdkName="operator-sdk" + +url="https://github.com/operator-framework/operator-sdk/releases/download/${sdkVersion}/operator-sdk_linux_amd64" +echo "Installing operator-sdk version $sdkVersion with name of $sdkName" +wget --quiet --progress=dot:giga -t 2 -T 60 -O $sdkName $url || true +sudo mkdir -p /usr/local/bin/ +chmod +x $sdkName +./$sdkName version +sudo mv $sdkName /usr/local/bin +operator-sdk version \ No newline at end of file diff --git a/scripts/run-unit-tests.sh b/scripts/run-unit-tests.sh new file mode 100755 index 00000000..58228b2c --- /dev/null +++ b/scripts/run-unit-tests.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +echo "Running unit tests..." + +export PATH=$PATH:$GOPATH/bin + +# List of packages to not run test for +EXCLUDED_PKGS=( + "/mocks" + "/manager$" + "/manager/resources$" + "/apis" + "/controller$" + "/controllers$" + "ibp-operator/config$" + "/integration" +) + +PKGS=`go list ./... | grep -v -f <(printf '%s\n' "${EXCLUDED_PKGS[@]}")` + +go test $PKGS -cover +exit $? diff --git a/testdata/deploy/ca/adminsecret.yaml b/testdata/deploy/ca/adminsecret.yaml new file mode 100644 index 00000000..176fbc6b --- /dev/null +++ b/testdata/deploy/ca/adminsecret.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Secret +metadata: + name: "ca1-admin-secret" +type: Opaque +data: + ca-admin-name: YWRtaW4= + ca-admin-password: MWYyZDFlMmU2N2Rm diff --git a/testdata/deploy/ca/tlssecret.yaml b/testdata/deploy/ca/tlssecret.yaml new file mode 100644 index 00000000..60e807fd --- /dev/null +++ b/testdata/deploy/ca/tlssecret.yaml @@ -0,0 +1,27 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Secret +metadata: + name: tlssecret +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURpekNDQW5PZ0F3SUJBZ0lKQU85SG84T1BGS2xtTUEwR0NTcUdTSWIzRFFFQkN3VUFNRkV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpPUXpFaE1COEdBMVVFQ2d3WVNXNTBaWEp1WlhRZ1YybGtaMmwwY3lCUQpkSGtnVEhSa01SSXdFQVlEVlFRRERBbHNiMk5oYkdodmMzUXdIaGNOTVRrd05ESXlNVGN5TVRRd1doY05NakF3Ck5ESXhNVGN5TVRRd1dqQlJNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0F3Q1RrTXhJVEFmQmdOVkJBb00KR0VsdWRHVnlibVYwSUZkcFpHZHBkSE1nVUhSNUlFeDBaREVTTUJBR0ExVUVBd3dKYkc5allXeG9iM04wTUlJQgpJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBMzA2cU5XT0d4T1N1OUVyNHRCVUpUSVN5ClhxUjJ1NzBnSW5BVzhUN0FaWUxKc1BLSjdDZklWSmpnbkxuTmFTbWNvZVpOVHlqK3VVdENVOWE2aUg3clBsVDYKOGlDTDdNekhRT2R1eDFxYzk5NDNoV1JKMS9EQWVMZTlUcmRkZU83eFNqenpZZWRVSHFReWZHVmhyMjdyclVLbApIVXYraDlDa2JwTXFJUytMdm9Yci9mTWVKVGFpbHdEMk9kTDU3UkdDVndTZE1iVnN0OTNkN3YweFBpQjdFcmczCjM3aS9tOUJVc3hQV2NmbW5hbEMxZ1NUYU1XQ1ZnMis0bGVzL2JMWjhsUjJNR1VZbnVLeEhGSUkwenZZTHNGYm0Kb09CaDhkWXJvVUZ6V1FpM1dONUQyVTlYamR6YUxXeTZBVmFqM0txblU5ZWd2eVFvUm90SWg4a3lOeGFqdXdJRApBUUFCbzJZd1pEQWRCZ05WSFE0RUZnUVVWQ3FZY0wwc09Obk1PZUxmU0lmM3d1VkNxQTR3SHdZRFZSMGpCQmd3CkZvQVVWQ3FZY0wwc09Obk1PZUxmU0lmM3d1VkNxQTR3REFZRFZSMFRCQVV3QXdFQi96QVVCZ05WSFJFRURUQUwKZ2dsc2IyTmhiR2h2YzNRd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDR3kzcVVnTXJGbzgzb0M4RWpKci9WYgpramF2TEEvTmd0eEFsaTJtWUxPQzBhcS9LMHhGRVFmWU1IK3Z5b20vWUg2ajQ3OWdWeHFWUVpYM0JhVW1DVFI1CjVyMGNrOC9Ga0Z6elo1VUhiVXpjblJMSFVkZWswdjBWa2p2RnJocXBPcGtYWE5HQUgxSmlXbHlkQjRRdmNXSXIKSmxkM29zcFh5L1ZwaTNSdnhQZGpiUjNNOU8zQVNFc2JQdldYNEw3SFFQYlFBNGVQWFd0NE1WMU0rblphOE9mUAo3SWZTTWRkdDliM3g0cHZQVElGZUJQTnptNWVTdjF1TUhGRmkrN1R2V3QwR0Z1cS9jZUZaN2pVVGN2eVlYeDJaClVuY0MvdFpFaDA4a2YzUkQ0NWd6TFZyQkpBaURzSXBDQ2pTendZT3o4N3dkZ25RaEoxMGtZb29xVjlNNU5iQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV3QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktvd2dnU21BZ0VBQW9JQkFRRGZUcW8xWTRiRTVLNzAKU3ZpMEZRbE1oTEplcEhhN3ZTQWljQmJ4UHNCbGdzbXc4b25zSjhoVW1PQ2N1YzFwS1p5aDVrMVBLUDY1UzBKVAoxcnFJZnVzK1ZQcnlJSXZzek1kQTUyN0hXcHozM2plRlpFblg4TUI0dDcxT3QxMTQ3dkZLUFBOaDUxUWVwREo4ClpXR3ZidXV0UXFVZFMvNkgwS1J1a3lvaEw0dStoZXY5OHg0bE5xS1hBUFk1MHZudEVZSlhCSjB4dFd5MzNkM3UKL1RFK0lIc1N1RGZmdUwrYjBGU3pFOVp4K2FkcVVMV0JKTm94WUpXRGI3aVY2ejlzdG55VkhZd1pSaWU0ckVjVQpnalRPOWd1d1Z1YWc0R0h4MWl1aFFYTlpDTGRZM2tQWlQxZU4zTm90YkxvQlZxUGNxcWRUMTZDL0pDaEdpMGlICnlUSTNGcU83QWdNQkFBRUNnZ0VCQUtxUW9YM0tHWVNHMEFPVXlFWjAwdmQ1Vyt6aVhqYWtETW1CS0dUNGMyaVYKNzR5U2xUcW1ZR1FOcVhMTWtOTkVqM2t5T2RhL0QyRmsxTExKRHcxM05DUVdNTitFY0p4ZEJjelRlVkFZTEZFLwprTlQ0ZDFiVGM1QkJkLzJLSFlGanNUWHRQRkdKcWowRldmS2VWWEQwbmx0bXprdUVZbFJHejNJT1FsMjZ2VmkxCjhOZytZRTVGRmlKemFkSGpJTGFqSHk4ME1xYUg1UUpiY2RUREowWWl3VDIweGVUMVk5clpMVTlCM0UzYVhaVGcKYTRoZitvZVNMcjNlMVk4bHVkRkw2SVlNSTFMc0pPOXhlZkU0MGE2cmZMRitELzEwK0RrcnFwdzVLNGs5MzRiVApadklaZmgvT1dMWHdHR1JoanNYVWlaREJXZDRLY2FBZzgzTUtSRWVkL2lFQ2dZRUE5ZDFlQzJXTHFwWk8rbmVmCjh3OWVBR0pyem1GOVpCZEd1T1VPSkJoeDFTSUREODB3dWFFUEQ5TEZwVnF6bStwREZXWDZ0a3hVTUc4cGEwRDcKWFUvU3Jta0lWNTM1aTVGbmV2SXdKdHJGUnkyS1pQdmRtOFdrdVA2QW9Cd252S2hBcHU2akJZS0lXVFh6NUt3QwowM3FSSGMzOHY2aWNUMEdkT1ZKc2N0WHhkZ01DZ1lFQTZJTS9OSW9HUGhrMjR1MmlKR2YwckFoSGJGL0IrNkJGCllVZ044S3o2MXp6T0RjbVdnekR0bU5PVi9EdStaRjI2UTVhci9mTmRPZWZ0ZHFJU01aY29qTEVnaFVJT0cwZnYKYjcvK1dtR1FiME8vcnRoY2hIVEZ4djk3alJ5U04vdkpxQTdFRnV0RE1mUHdiZUl0NXhLRXBqb0UxNVEyWDBLMwpFenBuL2VUNWFla0NnWUVBMndQVEhydmFhcDl5dzFPRXZIUlgvR2V3T1N4SEVyNVpWYU5Wc1huRkRXTTY3dnlPCkJ3NGQ2SzZOT2Z0T08vbTJ3SDBUbFFqeGhpTy85YmJ4TS9KRGJ2SkphQ05saEpxR1g5TVdhY1pBTG1PM0FMWU0KZHhMYzVaNXczaSsyaGl3clM0a1ExM3VzRWl3cGt1NVhwaU5zMmV3QTFvcFFrNW1UZ05pWG1zUlNVQmtDZ1lFQQp3K3lVZmZrd2VYTlZ4TExwUmpRekFDT1p5OENFL1R0NE94azBaZkhkRFRHM2ovYW1WYllOQUJLSytCaC92cU1jCktZZjFOZkMwUmU4aE1pNHZsb29CS1V5NFVwSHV3UjVFckszajd0VDNtRXBHWTFiOTNyOW5TT0JQaEFEblZUVmUKSDdjUmxSNTVhTVpkZXJwMFk1by9ITE1YRVhGYXY3cS8rZnZsVlJSMDlzRUNnWUVBODBMZWZPeHRSRG8yd2pUZApHK0dyTENqQ2pLUFhtVDJxNEo1RzB1WE5TT0Irc1pYaDQyRVBsOUYvaVRVQWU4bnhEbG0rOUNuR2ZoKzAyRWR3ClJGNzZUWm1wVXdka3lDMEtvQ3dXY28zd0dCL1U5Wlk4QW1wMGVoV2RTTkx5eThjanVIT1VsVTA0ZWdVYUlOb2sKdHRhMEtRaWJnbGRRNjRsbU5FOWN0LzVpZ3FnPQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== + diff --git a/testdata/deploy/console/secret.yaml b/testdata/deploy/console/secret.yaml new file mode 100644 index 00000000..82487e2a --- /dev/null +++ b/testdata/deploy/console/secret.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Secret +metadata: + name: secret-ibp-console +type: Opaque +data: + secret.json: "{
    "msp": {
      "component": {
        "keystore": ["LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ0h4bDZLTGJEa1NGVXRjZkEKcnRqU3NYNi8zZG9JUTFFQitYSjR5clV4NVhTaFJBTkNBQVFZd3JNNjFxYjliaGhxWVk4ZzVYYVVsakpZTUp2ZgowbUMyZU82WE1zRUFUVnUwS2dCU0ZPcEhvd1pieko4d2pzUnNnS1U0VTdHMDZzMXI1ZW5vbFYxbAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg=="],
        "signcerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3akNDQXBTZ0F3SUJBZ0lVR3lDRnl1NjcrSWdiaDZWVzQwMUxNOHd5aXhJd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQTFNREl3TUZvWERURTVNVEV4TWpBMU1EY3dNRm93ZkRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRXVNQXNHQTFVRUN4TUVjR1ZsY2pBTEJnTlZCQXNUQkc5eVp6RXdFZ1lEVlFRTEV3dGtaWEJoCmNuUnRaVzUwTVRFT01Bd0dBMVVFQXhNRmIzSmtaWEl3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkMKQUFRWXdyTTYxcWI5YmhocVlZOGc1WGFVbGpKWU1KdmYwbUMyZU82WE1zRUFUVnUwS2dCU0ZPcEhvd1pieko4dwpqc1JzZ0tVNFU3RzA2czFyNWVub2xWMWxvNElCQmpDQ0FRSXdEZ1lEVlIwUEFRSC9CQVFEQWdlQU1Bd0dBMVVkCkV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZCelBpTE9kSW1pN1diRXFMOVFJcUJobGlBLzNNQjhHQTFVZEl3UVkKTUJhQUZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNRG9HQTFVZEVRUXpNREdDTDJ4cFkyaDFibXd0WTJFdApabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0WWpjNVlqVm1PVGcyTFhOa05IWTJNR1lHQ0NvREJBVUdCd2dCCkJGcDdJbUYwZEhKeklqcDdJbWhtTGtGbVptbHNhV0YwYVc5dUlqb2liM0puTVM1a1pYQmhjblJ0Wlc1ME1TSXMKSW1obUxrVnVjbTlzYkcxbGJuUkpSQ0k2SW05eVpHVnlJaXdpYUdZdVZIbHdaU0k2SW5CbFpYSWlmWDB3Q2dZSQpLb1pJemowRUF3SURTQUF3UlFJaEFJaUd3V2d2VHFwSjVCWlF1c21TMnR1bWN6cHlvTlRDZVZEaGE0ck0xcElVCkFpQksxY0d4eTZ1ZVprNkRYOHFDL1M5dXpHZW5STzZvVitlUVd4ekxiWGhiZGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "cacerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVUXg1NSt1OFI0QVZGMVBtdmFUaXgvRURVYnl3d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQXhOVEl3TUZvWERUTXpNVEV3T0RBeE5USXdNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVzOTRSOXgyaHR4MzVRNERYaVc0UXlQaUEKaGRDUjVEb01ueU5iamt0Sjl1T3pZR28rT3ZRdzhpdXFyeTRoNlhBTG9kNnUwQ1pmQWdJNjRUaVEzZkNaeEtORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQ3M3NmJDRm1LSm8KSXVhWkF4aUFKMjlvVURRV1I0cmNYUlVjSUh3TTd4NWRBaUFJUDNua0paUFdKMmR6NDZWaEtNdHNsNXE0ZDhyWgpsY0NuOUcrcW1Hc0JoZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"],
        "admincerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNaVENDQWd1Z0F3SUJBZ0lVVWttbmo3NktVOElaMHVQL3FmYnhPYWtmYjFBd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQXlNVEl3TUZvWERURTVNVEV4TWpBeU1UY3dNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBUmFUa05yQ1FoK25lN2YxYWVFdEljRzZ4aFRKbXZORFJiVnVtdSsKUSt4WTJqenNjcHFJSUxLdEdBU3NlV2J2TzltV09WcmRlT1ZLck5kYmdPUWhRZVNqZ1owd2dab3dEZ1lEVlIwUApBUUgvQkFRREFnZUFNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZPOHg1MUx0a2J2dk1hWmJXZTFmCmwrbmlweHdlTUI4R0ExVWRJd1FZTUJhQUZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNRG9HQTFVZEVRUXoKTURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0WWpjNVlqVm1PVGcyTFhOawpOSFkyTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFEWDN2dHJvaVNydXZFS09aRzNlT0pWeWQ2N3BMMUI4VUpqCmI4cjlka3VLa3dJZ0tnZlp2clB3WCtpdjI0Uk5vK3JMVlNJUEdUTnE5VVVIK2RkbzEveThHN2M9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"],
        "intermediatecerts": [""]
      },
      "tls": {
        "keystore": ["LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZzZXWGJKa2NkR1hjOGVTYlYKQTEzMzdRWUczSXJEaVZTQURwNDBuVW1RSmIyaFJBTkNBQVRabzFtMWt0YTVXZmYyWDZMNG1RN09aVE56NlhLMwpuS1BTYTVucjZuUC9hUjJ1bVM4ZUtqMitBVzYwRXd4VklreGxEVXhsRFpqZVhWZnZMdDRTYWloeAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg=="],
        "signcerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDRENDQXErZ0F3SUJBZ0lVZExQY3pjd0FaWXdrdTZ1dVplYWNiV2sxYmwwd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURVd05EQXdXaGNOTVRreE1URXlNRFV3T1RBd1dqQjhNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVM0d0N3WURWUVFMRXdSd1pXVnlNQXNHQTFVRUN4TUViM0puTVRBU0JnTlZCQXNUQzJSbGNHRnlkRzFsCmJuUXhNUTR3REFZRFZRUURFd1Z2Y21SbGNqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJObWoKV2JXUzFybFo5L1pmb3ZpWkRzNWxNM1BwY3JlY285SnJtZXZxYy85cEhhNlpMeDRxUGI0QmJyUVRERlVpVEdVTgpUR1VObU41ZFYrOHUzaEpxS0hHamdnRWxNSUlCSVRBT0JnTlZIUThCQWY4RUJBTUNBNmd3SFFZRFZSMGxCQll3CkZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZDdUIKT2pmQ29qMjZXbDZraWtGc2RzbktOalQ0TUI4R0ExVWRJd1FZTUJhQUZPdGM0ajY3SFJkd1E2NEVvZXk3VTZZSgo5VnNyTURvR0ExVWRFUVF6TURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0CllqYzVZalZtT1RnMkxYTmtOSFkyTUdZR0NDb0RCQVVHQndnQkJGcDdJbUYwZEhKeklqcDdJbWhtTGtGbVptbHMKYVdGMGFXOXVJam9pYjNKbk1TNWtaWEJoY25SdFpXNTBNU0lzSW1obUxrVnVjbTlzYkcxbGJuUkpSQ0k2SW05eQpaR1Z5SWl3aWFHWXVWSGx3WlNJNkluQmxaWElpZlgwd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ1NoS3VDQ3FoCjlkbVBFUXA5eE55cHA1TDEyOHhBQXIvb2xxeDdtMjcyREgwQ0lIYzFzeFZqeG5TNE1rZ044VnZNZlFvR2lZeUQKK0M1T1ZhdndCeHFhYW5OZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "cacerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNKVENDQWN5Z0F3SUJBZ0lVSlN2WjVvSHZEMmxDdW5qQnphZW02QXpyR1pRd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURFMU1qQXdXaGNOTXpNeE1UQTRNREUxTWpBd1dqQmtNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVE4d0RRWURWUVFMRXdaR1lXSnlhV014RlRBVEJnTlZCQU1UREhSc2MyTmhMV052YlcxdmJqQlpNQk1HCkJ5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCR0dxVDB4SU5mTXVQRTVqdVFCZ0dzK2tiRzY5WDdMMlBhSm4KQllrRGVGYzdGNGdybFovc0hqcTU4K1FCRStyazFzd3VqQjhrbW9ranhjOWsxN1hPRDllalhEQmFNQTRHQTFVZApEd0VCL3dRRUF3SUJCakFTQmdOVkhSTUJBZjhFQ0RBR0FRSC9BZ0VCTUIwR0ExVWREZ1FXQkJUclhPSSt1eDBYCmNFT3VCS0hzdTFPbUNmVmJLekFWQmdOVkhSRUVEakFNaHdRSkRCTXNod1FLRkZGYU1Bb0dDQ3FHU000OUJBTUMKQTBjQU1FUUNJRmRocFV1UHpWOVBDQmxmZWhkcnBFdHNrUjFaYWRySDM5VU96OUoySnV5NUFpQjU0QnZLdmtaTwpNdVU2bEZMNVFwTEN3TmRCZUlaclNydzhzVlNmNTZEcWVRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="],
        "admincerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNnRENDQWlhZ0F3SUJBZ0lVTmxySmFWZmtPeWhBVVZvSGNvYVErRGdUWkxJd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURJeE5qQXdXaGNOTVRreE1URXlNREl5TVRBd1dqQmRNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVE4d0RRWURWUVFMRXdaamJHbGxiblF4RGpBTUJnTlZCQU1UQldGa2JXbHVNRmt3RXdZSEtvWkl6ajBDCkFRWUlLb1pJemowREFRY0RRZ0FFWHBjUkM2cDd5bGwxUzZ3SmVldGVZQnRUR2tzTVorMk9RTGd1UVcrRWlNZ00KajdESXl3aWk0OUtzbmhtTFJLLzdqMndJMmJ4c0xFcDc3cGExNURaR0JxT0J2RENCdVRBT0JnTlZIUThCQWY4RQpCQU1DQTZnd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDCk1BQXdIUVlEVlIwT0JCWUVGRDRIOGpieUhlQTlhY3dDcmU0SVgxamhnQmJJTUI4R0ExVWRJd1FZTUJhQUZPdGMKNGo2N0hSZHdRNjRFb2V5N1U2WUo5VnNyTURvR0ExVWRFUVF6TURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sagpMV05oTFdSbGNHeHZlVzFsYm5RdFlqYzVZalZtT1RnMkxYTmtOSFkyTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklRQ2txc3l2RG52bkJUSEgycDBSSDlwQjlJRFFqd083d0UxODZRWnBRWi9kdVFJZ1c2dW05NXBBSEFjcCs2NlkKOW4xcTVVSzFMQWJRZ0wwWm94OTVNazhZZzU4PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "intermediatecerts": [""]
      }
    }
}" \ No newline at end of file diff --git a/testdata/deploy/console/tlssecret.yaml b/testdata/deploy/console/tlssecret.yaml new file mode 100644 index 00000000..0cda6a6d --- /dev/null +++ b/testdata/deploy/console/tlssecret.yaml @@ -0,0 +1,26 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBVENDQWVtZ0F3SUJBZ0lKQUlHcXBwVFZlVm1DTUEwR0NTcUdTSWIzRFFFQkJRVUFNQmN4RlRBVEJnTlYKQkFNTUREa3VNekF1TWpJekxqRXlNVEFlRncweE9UQTFNRE14T1RJMk5EaGFGdzB5T1RBME16QXhPVEkyTkRoYQpNQmN4RlRBVEJnTlZCQU1NRERrdU16QXVNakl6TGpFeU1UQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQCkFEQ0NBUW9DZ2dFQkFNUm1qei8xelpwcEo3WHYrRVhEL3pla2I0VFlTb1p5N1JsZXlRSG5FTmdLK3BuRnVFcVUKSUlkdkJCbXhTcjVEWHFtQ21RL04zOXZ1NVowR1UzYXUzRGQ1T3ZwbVJjRHdpYjhFY1RtdXYvN0ZyVUZDeDBTQQp5THI4RHRXZENkK3ZndjlxWmJQRGpvcTk3VHVFbGExYlQxK3B5b2h5ZnJFVmsxWUcrekhHS0dUTU5rRGtUY2I4CmRZeEdPNkltZS9tbVJQM05zSlNOck1vWVN5RURuWkIvUGtFcmxoa1AwZFNUazRtWm5GdW1GTnlXaU8zWmZzd0oKT2R3YlI0WlcydktMODREZ1RYRE9ZYnIyZU5CYXZvNVgxWDR2L01PVW15bUh3RXZyK2kySy9GditxM2FGVFp2OApxZW9xdmR1WkExWTlrcTVPVTRFOU95Smw1bStib3A1aThTa0NBd0VBQWFOUU1FNHdIUVlEVlIwT0JCWUVGRVk2CjI2VzM3ZDNOdXM2N1ZQSHJvVzlFVDZTU01COEdBMVVkSXdRWU1CYUFGRVk2MjZXMzdkM051czY3VlBIcm9XOUUKVDZTU01Bd0dBMVVkRXdRRk1BTUJBZjh3RFFZSktvWklodmNOQVFFRkJRQURnZ0VCQURXRVJ3dDVzVUVBWTViRApkRTBFK3dzYzZnNk1zeXR1S2lPc2dhSktuM0VkeEVIUHphRWoyaEhmVzBQU1p4cmVTTUNVbE1lWVdKVmdkTTFFClVlUDVRbk1HNDRqTGs1aFJOazk0Tm04aTd0bUJaL3lEOXlBeGl0SXRpVjduR2gvaTlTQnRGQVZQeDhVVEYrZHEKbEJhOTZtOEh2aktvcnJ5T3ViOE1YVCtLWU4yQWVZVEp3QlE4WXorQy9lN0RNSk91VXJqem9Ia01hdzRXSUIvcQpRTk5mNjY5SURtT3BhMnZ4dCtTOUJNeDNsL2pkYmxpZXhxV1A2NUc3SzN1eGEzdHI2UWhxNmhjK2VCNElTaEtsCnR4OHhSZVFrRFV4Uk1Vd05iWVZSdU1wTnNmR0FzcmRXeHNyWDRFeVBZUWIrMDh1VkNCUGdtRVlldHdIY2tscXoKZ2FPbGowOD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBeEdhUFAvWE5tbWtudGUvNFJjUC9ONlJ2aE5oS2huTHRHVjdKQWVjUTJBcjZtY1c0ClNwUWdoMjhFR2JGS3ZrTmVxWUtaRDgzZjIrN2xuUVpUZHE3Y04zazYrbVpGd1BDSnZ3UnhPYTYvL3NXdFFVTEgKUklESXV2d08xWjBKMzYrQy8ycGxzOE9PaXIzdE80U1ZyVnRQWDZuS2lISitzUldUVmdiN01jWW9aTXcyUU9STgp4dngxakVZN29pWjcrYVpFL2Myd2xJMnN5aGhMSVFPZGtIOCtRU3VXR1EvUjFKT1RpWm1jVzZZVTNKYUk3ZGwrCnpBazUzQnRIaGxiYThvdnpnT0JOY001aHV2WjQwRnEramxmVmZpLzh3NVNiS1lmQVMrdjZMWXI4Vy82cmRvVk4KbS95cDZpcTkyNWtEVmoyU3JrNVRnVDA3SW1YbWI1dWlubUx4S1FJREFRQUJBb0lCQUN2T25JRXR1SkNLaC9zQwo3YVBTRmVrNVdrNk5XQUlwUEUxK1lPTjF3TUlQb05vUHlnMklnTUQwdUR0cTJqc2FGOUZEZWhTV1hTcFhYUXZyClQwNlhyak1KYldoUXk1by9qTm40aWJET0U3RW1Nb0R0L2poL2FVYWIxa3M3WGVwdzRZMUxGQ2hHcEZLNkRtSksKTG9DZWIxUHRNV25COEJSYnRhTm9wenJ3OGkyWkl6RWpocjd1WUpRR2tGWll1SUhrZ3piVW1rZ2tPRk5MUG5CWQpCSkxIT1RaSXlaMS81cUE3VDB2TC8rTXl6cE5pNCs1T1lhaXp0aGFmaTJHNkhYNGhzL2VvS1BzMHpLbEFZRFQ5CkJ3KzdBd0NaQ1V1K1RreUZPWEt0djh3L1BybnFxb0J2cnRqVlRHVUxUSWZOb05SL0NWdHYzWjI1cFE0bkt4NmwKc2FCaFJpRUNnWUVBNEszY1grZ0tiU0hUczlCUnJPMm5CWFhRRnBpNjZjQjNsNkc3WStwbjJkWnAyQk55NUVJLwpjYjJPZkM4NEV5c2NaeEFYeDRXT3VRWUhwMWwyeTZ6NC9UbzJPdDlPa1pVSG5Jd2hCYURGQTJkSGJtd3BWZEtkCmZvZkRhQUVRemp6eVc5bnpWSE5zRzd5bWJQTUlRcjdNOEdQM3h2cVVEYU94NG0rU0l6Nk1aY1VDZ1lFQTM4ZUYKZGtCZHZvcFYzNXkyNGRnL0wxeHNIelQ1enBISFZyanRBTkxrYmp1VDVWbXVwZ3d1emFXWkZyc1RYWFRKbmQ2eQphUlJ3T2gxQktKT2MvL1lkb0IvK2dlUFFCZnN2R3pmWEJTb3plQXZXK2FVM201ZW1jczJxRmhSQnF0TjVYMXFMClZMMllSa1BNWHQwLzNpYVhQQUlZTDNpUEFBa2VpbE5RSTRsbHVCVUNnWUJ4RkEvTjFTTXFPZFFxczdYbnF0UmMKMHlhZnVNNlp4dHhwM0dSTHJUWUhWUHFBWFlsOWlEb1dMS2tCcDJJNWc0RVAvZkY1NGFZclNQV2hMRTIxeEtDcwpFaGdwR1NxWjZyK2g4b1RNS0ZYL21JTkM5L3oxek1LblkvemM0MnhwNEJLNlY3ODN5YituVmhNTTBtUmQ4MW1CCncwNTVQclExQ1NZR0tORU1HL2JkWVFLQmdHbXY2YkIzcGM2ZnR2N3RITko5TnhvL0pERjQ2NkpMNTBGbUJVa20KVVF4ZXh2TEM4R0Vyejg4QUx0VTFkV2ZtQ0hLdkFzUHVDekxuTGliajBIcHkybnNOb1QzblFoQVJHYVpaTVF3WApha2VBRC8vSHNBT2tFOUNVb1lmYitVZWNxTzRIN1ZZUVZTS2FVcU5iQ3hiNFV1VGNlRit6S0paenVDRE1TRVVBClZ6SWhBb0dCQU1xYmZhYkJJR3dYYnNhSmREMGlQQzkwbEZ0M2RtejBST3pGcnZXUGpOd29BL1RWaDY1djRIWkkKMkNESm9TRk5BS0o4b01NQjVyaU85UU5QVVYzYmpZNm8xendIUjQ4QjhIZCt3em9FeWVMVG05dDdqc0ZmNUlmQQpDWUhrSE5VQlkrZGt5Q0FvZnhrNEhSMm5zcG1CRVNLd0xYVUVjTmxqeDdsbFZadjhlNC9VCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0t +kind: Secret +metadata: + name: tlssecret-ibp-console +type: kubernetes.io/tls \ No newline at end of file diff --git a/testdata/deploy/console/ui-password-secret.yaml b/testdata/deploy/console/ui-password-secret.yaml new file mode 100644 index 00000000..c9323ee4 --- /dev/null +++ b/testdata/deploy/console/ui-password-secret.yaml @@ -0,0 +1,25 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +data: + password: cGFzc3dvcmQ= +kind: Secret +metadata: + name: ui-password-secret +type: generic \ No newline at end of file diff --git a/testdata/deploy/operator.yaml b/testdata/deploy/operator.yaml new file mode 100644 index 00000000..ec3118d1 --- /dev/null +++ b/testdata/deploy/operator.yaml @@ -0,0 +1,145 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operator + labels: + release: "operator" +spec: + replicas: 1 + strategy: + type: "Recreate" + selector: + matchLabels: + name: operator + template: + metadata: + labels: + release: "operator" + spec: + hostIPC: false + hostNetwork: false + hostPID: false + serviceAccountName: operator + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + securityContext: + runAsNonRoot: true + runAsUser: 1001 + fsGroup: 2000 + imagePullSecrets: + - name: regcred + # TODO:OSS remove initcontainers + initContainers: + - name: "hsm-client" + image: "ghcr.io/ibm-blockchain/ibp-pkcs11-proxy/gemalto-client:1.0.3-amd64" + imagePullPolicy: Always + command: + - "sh" + - "-c" + - 'cp $ENV_FILE /hsm/.env && source /hsm/.env && src=($SOURCE) && trgt=($TARGET) && for i in ${!src[@]}; do filename=$(basename -- ${src[i]}) dst="/hsm/$filename"; echo "Copying ${src[i]} to ${dst}"; mkdir -p $(dirname $dst); cp -r ${src[i]} $dst; done' + securityContext: + privileged: true + allowPrivilegeEscalation: true + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + volumeMounts: + - name: "hsm-volume" + mountPath: "/hsm" + resources: + requests: + cpu: 0.1 + memory: "100Mi" + ephemeral-storage: "100Mi" + limits: + cpu: 2 + memory: "4Gi" + ephemeral-storage: "1Gi" + containers: + - name: operator + command: + - "sh" + - "-c" + - "source /hsm/.env && operator" + imagePullPolicy: Always + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 1001 + capabilities: + drop: + - ALL + add: + - CHOWN + - FOWNER + livenessProbe: + tcpSocket: + port: 8383 + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + tcpSocket: + port: 8383 + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 5 + volumeMounts: + - name: "hsm-volume" + mountPath: "/hsm" + - name: "hsm-volume" + mountPath: "/etc/Chrystoki.conf" + subPath: "Chrystoki.conf" + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "operator" + - name: CLUSTERTYPE + value: K8S + resources: + requests: + cpu: 100m + memory: 200Mi + ephemeral-storage: 100Mi + limits: + cpu: 100m + memory: 200Mi + ephemeral-storage: 100Mi + volumes: + - name: hsm-volume + emptyDir: + medium: Memory diff --git a/testdata/deploy/operatorhsm.yaml b/testdata/deploy/operatorhsm.yaml new file mode 100644 index 00000000..9a1cbd79 --- /dev/null +++ b/testdata/deploy/operatorhsm.yaml @@ -0,0 +1,150 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operator + labels: + release: "operator" + app.kubernetes.io/managed-by: "operator" +spec: + replicas: 1 + strategy: + type: "Recreate" + selector: + matchLabels: + name: operator + template: + metadata: + labels: + name: operator + release: "operator" + app.kubernetes.io/managed-by: "operator" + spec: + hostIPC: false + hostNetwork: false + hostPID: false + serviceAccountName: operator + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + securityContext: + runAsNonRoot: true + runAsUser: 1001 + fsGroup: 2000 + imagePullSecrets: + - name: regcred + # TODO:OSS remove initcontainers + initContainers: + - name: "hsm-client" + image: "ghcr.io/ibm-blockchain/ibp-pkcs11-proxy/gemalto-client:1.0.3-amd64" + imagePullPolicy: Always + command: + - "sh" + - "-c" + - 'cp $ENV_FILE /hsm/.env && source /hsm/.env && src=($SOURCE) && trgt=($TARGET) && for i in ${!src[@]}; do filename=$(basename -- ${src[i]}) dst="/hsm/$filename"; echo "Copying ${src[i]} to ${dst}"; mkdir -p $(dirname $dst); cp -r ${src[i]} $dst; done' + securityContext: + privileged: true + allowPrivilegeEscalation: true + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + volumeMounts: + - name: "hsm-volume" + mountPath: "/hsm" + resources: + requests: + cpu: 0.1 + memory: "100Mi" + ephemeral-storage: "100Mi" + limits: + cpu: 2 + memory: "4Gi" + ephemeral-storage: "1Gi" + containers: + - name: operator + command: + - "sh" + - "-c" + - "source /hsm/.env && operator" + imagePullPolicy: Always + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 1001 + capabilities: + drop: + - ALL + add: + - CHOWN + - FOWNER + livenessProbe: + tcpSocket: + port: 8383 + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + tcpSocket: + port: 8383 + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 5 + volumeMounts: + - name: "hsm-volume" + mountPath: "/hsm" + - name: "hsm-volume" + mountPath: "/etc/Chrystoki.conf" + subPath: "Chrystoki.conf" + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "operator" + - name: CLUSTERTYPE + value: K8S + - name: HSM_CLIENT_IMAGE + value: ghcr.io/ibm-blockchain/ibp-pkcs11-proxy/gemalto-client:1.0.3-amd64 + resources: + requests: + cpu: 100m + memory: 200Mi + ephemeral-storage: 100Mi + limits: + cpu: 100m + memory: 200Mi + ephemeral-storage: 1Gi + volumes: + - name: hsm-volume + emptyDir: + medium: Memory \ No newline at end of file diff --git a/testdata/deploy/orderer/secret.yaml b/testdata/deploy/orderer/secret.yaml new file mode 100644 index 00000000..8879061b --- /dev/null +++ b/testdata/deploy/orderer/secret.yaml @@ -0,0 +1,24 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: v1 +kind: Secret +metadata: + name: secret-ibp-orderer +type: Opaque +data: + secret.json: "{
    "msp": {
      "component": {
        "keystore": ["LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ0h4bDZLTGJEa1NGVXRjZkEKcnRqU3NYNi8zZG9JUTFFQitYSjR5clV4NVhTaFJBTkNBQVFZd3JNNjFxYjliaGhxWVk4ZzVYYVVsakpZTUp2ZgowbUMyZU82WE1zRUFUVnUwS2dCU0ZPcEhvd1pieko4d2pzUnNnS1U0VTdHMDZzMXI1ZW5vbFYxbAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg=="],
        "signcerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3akNDQXBTZ0F3SUJBZ0lVR3lDRnl1NjcrSWdiaDZWVzQwMUxNOHd5aXhJd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQTFNREl3TUZvWERURTVNVEV4TWpBMU1EY3dNRm93ZkRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRXVNQXNHQTFVRUN4TUVjR1ZsY2pBTEJnTlZCQXNUQkc5eVp6RXdFZ1lEVlFRTEV3dGtaWEJoCmNuUnRaVzUwTVRFT01Bd0dBMVVFQXhNRmIzSmtaWEl3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkMKQUFRWXdyTTYxcWI5YmhocVlZOGc1WGFVbGpKWU1KdmYwbUMyZU82WE1zRUFUVnUwS2dCU0ZPcEhvd1pieko4dwpqc1JzZ0tVNFU3RzA2czFyNWVub2xWMWxvNElCQmpDQ0FRSXdEZ1lEVlIwUEFRSC9CQVFEQWdlQU1Bd0dBMVVkCkV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZCelBpTE9kSW1pN1diRXFMOVFJcUJobGlBLzNNQjhHQTFVZEl3UVkKTUJhQUZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNRG9HQTFVZEVRUXpNREdDTDJ4cFkyaDFibXd0WTJFdApabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0WWpjNVlqVm1PVGcyTFhOa05IWTJNR1lHQ0NvREJBVUdCd2dCCkJGcDdJbUYwZEhKeklqcDdJbWhtTGtGbVptbHNhV0YwYVc5dUlqb2liM0puTVM1a1pYQmhjblJ0Wlc1ME1TSXMKSW1obUxrVnVjbTlzYkcxbGJuUkpSQ0k2SW05eVpHVnlJaXdpYUdZdVZIbHdaU0k2SW5CbFpYSWlmWDB3Q2dZSQpLb1pJemowRUF3SURTQUF3UlFJaEFJaUd3V2d2VHFwSjVCWlF1c21TMnR1bWN6cHlvTlRDZVZEaGE0ck0xcElVCkFpQksxY0d4eTZ1ZVprNkRYOHFDL1M5dXpHZW5STzZvVitlUVd4ekxiWGhiZGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "cacerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVUXg1NSt1OFI0QVZGMVBtdmFUaXgvRURVYnl3d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQXhOVEl3TUZvWERUTXpNVEV3T0RBeE5USXdNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVzOTRSOXgyaHR4MzVRNERYaVc0UXlQaUEKaGRDUjVEb01ueU5iamt0Sjl1T3pZR28rT3ZRdzhpdXFyeTRoNlhBTG9kNnUwQ1pmQWdJNjRUaVEzZkNaeEtORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQ3M3NmJDRm1LSm8KSXVhWkF4aUFKMjlvVURRV1I0cmNYUlVjSUh3TTd4NWRBaUFJUDNua0paUFdKMmR6NDZWaEtNdHNsNXE0ZDhyWgpsY0NuOUcrcW1Hc0JoZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"],
        "admincerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNaVENDQWd1Z0F3SUJBZ0lVVWttbmo3NktVOElaMHVQL3FmYnhPYWtmYjFBd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQXlNVEl3TUZvWERURTVNVEV4TWpBeU1UY3dNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBUmFUa05yQ1FoK25lN2YxYWVFdEljRzZ4aFRKbXZORFJiVnVtdSsKUSt4WTJqenNjcHFJSUxLdEdBU3NlV2J2TzltV09WcmRlT1ZLck5kYmdPUWhRZVNqZ1owd2dab3dEZ1lEVlIwUApBUUgvQkFRREFnZUFNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZPOHg1MUx0a2J2dk1hWmJXZTFmCmwrbmlweHdlTUI4R0ExVWRJd1FZTUJhQUZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNRG9HQTFVZEVRUXoKTURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0WWpjNVlqVm1PVGcyTFhOawpOSFkyTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFEWDN2dHJvaVNydXZFS09aRzNlT0pWeWQ2N3BMMUI4VUpqCmI4cjlka3VLa3dJZ0tnZlp2clB3WCtpdjI0Uk5vK3JMVlNJUEdUTnE5VVVIK2RkbzEveThHN2M9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"],
        "intermediatecerts": [""]
      },
      "tls": {
        "keystore": ["LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZzZXWGJKa2NkR1hjOGVTYlYKQTEzMzdRWUczSXJEaVZTQURwNDBuVW1RSmIyaFJBTkNBQVRabzFtMWt0YTVXZmYyWDZMNG1RN09aVE56NlhLMwpuS1BTYTVucjZuUC9hUjJ1bVM4ZUtqMitBVzYwRXd4VklreGxEVXhsRFpqZVhWZnZMdDRTYWloeAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg=="],
        "signcerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDRENDQXErZ0F3SUJBZ0lVZExQY3pjd0FaWXdrdTZ1dVplYWNiV2sxYmwwd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURVd05EQXdXaGNOTVRreE1URXlNRFV3T1RBd1dqQjhNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVM0d0N3WURWUVFMRXdSd1pXVnlNQXNHQTFVRUN4TUViM0puTVRBU0JnTlZCQXNUQzJSbGNHRnlkRzFsCmJuUXhNUTR3REFZRFZRUURFd1Z2Y21SbGNqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJObWoKV2JXUzFybFo5L1pmb3ZpWkRzNWxNM1BwY3JlY285SnJtZXZxYy85cEhhNlpMeDRxUGI0QmJyUVRERlVpVEdVTgpUR1VObU41ZFYrOHUzaEpxS0hHamdnRWxNSUlCSVRBT0JnTlZIUThCQWY4RUJBTUNBNmd3SFFZRFZSMGxCQll3CkZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZDdUIKT2pmQ29qMjZXbDZraWtGc2RzbktOalQ0TUI4R0ExVWRJd1FZTUJhQUZPdGM0ajY3SFJkd1E2NEVvZXk3VTZZSgo5VnNyTURvR0ExVWRFUVF6TURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0CllqYzVZalZtT1RnMkxYTmtOSFkyTUdZR0NDb0RCQVVHQndnQkJGcDdJbUYwZEhKeklqcDdJbWhtTGtGbVptbHMKYVdGMGFXOXVJam9pYjNKbk1TNWtaWEJoY25SdFpXNTBNU0lzSW1obUxrVnVjbTlzYkcxbGJuUkpSQ0k2SW05eQpaR1Z5SWl3aWFHWXVWSGx3WlNJNkluQmxaWElpZlgwd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ1NoS3VDQ3FoCjlkbVBFUXA5eE55cHA1TDEyOHhBQXIvb2xxeDdtMjcyREgwQ0lIYzFzeFZqeG5TNE1rZ044VnZNZlFvR2lZeUQKK0M1T1ZhdndCeHFhYW5OZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "cacerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNKVENDQWN5Z0F3SUJBZ0lVSlN2WjVvSHZEMmxDdW5qQnphZW02QXpyR1pRd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURFMU1qQXdXaGNOTXpNeE1UQTRNREUxTWpBd1dqQmtNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVE4d0RRWURWUVFMRXdaR1lXSnlhV014RlRBVEJnTlZCQU1UREhSc2MyTmhMV052YlcxdmJqQlpNQk1HCkJ5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCR0dxVDB4SU5mTXVQRTVqdVFCZ0dzK2tiRzY5WDdMMlBhSm4KQllrRGVGYzdGNGdybFovc0hqcTU4K1FCRStyazFzd3VqQjhrbW9ranhjOWsxN1hPRDllalhEQmFNQTRHQTFVZApEd0VCL3dRRUF3SUJCakFTQmdOVkhSTUJBZjhFQ0RBR0FRSC9BZ0VCTUIwR0ExVWREZ1FXQkJUclhPSSt1eDBYCmNFT3VCS0hzdTFPbUNmVmJLekFWQmdOVkhSRUVEakFNaHdRSkRCTXNod1FLRkZGYU1Bb0dDQ3FHU000OUJBTUMKQTBjQU1FUUNJRmRocFV1UHpWOVBDQmxmZWhkcnBFdHNrUjFaYWRySDM5VU96OUoySnV5NUFpQjU0QnZLdmtaTwpNdVU2bEZMNVFwTEN3TmRCZUlaclNydzhzVlNmNTZEcWVRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="],
        "admincerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNnRENDQWlhZ0F3SUJBZ0lVTmxySmFWZmtPeWhBVVZvSGNvYVErRGdUWkxJd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURJeE5qQXdXaGNOTVRreE1URXlNREl5TVRBd1dqQmRNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVE4d0RRWURWUVFMRXdaamJHbGxiblF4RGpBTUJnTlZCQU1UQldGa2JXbHVNRmt3RXdZSEtvWkl6ajBDCkFRWUlLb1pJemowREFRY0RRZ0FFWHBjUkM2cDd5bGwxUzZ3SmVldGVZQnRUR2tzTVorMk9RTGd1UVcrRWlNZ00KajdESXl3aWk0OUtzbmhtTFJLLzdqMndJMmJ4c0xFcDc3cGExNURaR0JxT0J2RENCdVRBT0JnTlZIUThCQWY4RQpCQU1DQTZnd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDCk1BQXdIUVlEVlIwT0JCWUVGRDRIOGpieUhlQTlhY3dDcmU0SVgxamhnQmJJTUI4R0ExVWRJd1FZTUJhQUZPdGMKNGo2N0hSZHdRNjRFb2V5N1U2WUo5VnNyTURvR0ExVWRFUVF6TURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sagpMV05oTFdSbGNHeHZlVzFsYm5RdFlqYzVZalZtT1RnMkxYTmtOSFkyTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklRQ2txc3l2RG52bkJUSEgycDBSSDlwQjlJRFFqd083d0UxODZRWnBRWi9kdVFJZ1c2dW05NXBBSEFjcCs2NlkKOW4xcTVVSzFMQWJRZ0wwWm94OTVNazhZZzU4PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "intermediatecerts": [""]
      }
    }
}" \ No newline at end of file diff --git a/testdata/deploy/peer/secret.yaml b/testdata/deploy/peer/secret.yaml new file mode 100644 index 00000000..41f70909 --- /dev/null +++ b/testdata/deploy/peer/secret.yaml @@ -0,0 +1,27 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Secret +metadata: + name: ibppeer1-secret +type: Opaque +data: + couchdbpwd: "YWRtaW4=" + couchdbusr: "YWRtaW4=" + secret.json: "{
    "msp": {
      "component": {
        "keystore": ["LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ0h4bDZLTGJEa1NGVXRjZkEKcnRqU3NYNi8zZG9JUTFFQitYSjR5clV4NVhTaFJBTkNBQVFZd3JNNjFxYjliaGhxWVk4ZzVYYVVsakpZTUp2ZgowbUMyZU82WE1zRUFUVnUwS2dCU0ZPcEhvd1pieko4d2pzUnNnS1U0VTdHMDZzMXI1ZW5vbFYxbAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg=="],
        "signcerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3akNDQXBTZ0F3SUJBZ0lVR3lDRnl1NjcrSWdiaDZWVzQwMUxNOHd5aXhJd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQTFNREl3TUZvWERURTVNVEV4TWpBMU1EY3dNRm93ZkRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRXVNQXNHQTFVRUN4TUVjR1ZsY2pBTEJnTlZCQXNUQkc5eVp6RXdFZ1lEVlFRTEV3dGtaWEJoCmNuUnRaVzUwTVRFT01Bd0dBMVVFQXhNRmIzSmtaWEl3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkMKQUFRWXdyTTYxcWI5YmhocVlZOGc1WGFVbGpKWU1KdmYwbUMyZU82WE1zRUFUVnUwS2dCU0ZPcEhvd1pieko4dwpqc1JzZ0tVNFU3RzA2czFyNWVub2xWMWxvNElCQmpDQ0FRSXdEZ1lEVlIwUEFRSC9CQVFEQWdlQU1Bd0dBMVVkCkV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZCelBpTE9kSW1pN1diRXFMOVFJcUJobGlBLzNNQjhHQTFVZEl3UVkKTUJhQUZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNRG9HQTFVZEVRUXpNREdDTDJ4cFkyaDFibXd0WTJFdApabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0WWpjNVlqVm1PVGcyTFhOa05IWTJNR1lHQ0NvREJBVUdCd2dCCkJGcDdJbUYwZEhKeklqcDdJbWhtTGtGbVptbHNhV0YwYVc5dUlqb2liM0puTVM1a1pYQmhjblJ0Wlc1ME1TSXMKSW1obUxrVnVjbTlzYkcxbGJuUkpSQ0k2SW05eVpHVnlJaXdpYUdZdVZIbHdaU0k2SW5CbFpYSWlmWDB3Q2dZSQpLb1pJemowRUF3SURTQUF3UlFJaEFJaUd3V2d2VHFwSjVCWlF1c21TMnR1bWN6cHlvTlRDZVZEaGE0ck0xcElVCkFpQksxY0d4eTZ1ZVprNkRYOHFDL1M5dXpHZW5STzZvVitlUVd4ekxiWGhiZGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "cacerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVUXg1NSt1OFI0QVZGMVBtdmFUaXgvRURVYnl3d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQXhOVEl3TUZvWERUTXpNVEV3T0RBeE5USXdNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVzOTRSOXgyaHR4MzVRNERYaVc0UXlQaUEKaGRDUjVEb01ueU5iamt0Sjl1T3pZR28rT3ZRdzhpdXFyeTRoNlhBTG9kNnUwQ1pmQWdJNjRUaVEzZkNaeEtORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQ3M3NmJDRm1LSm8KSXVhWkF4aUFKMjlvVURRV1I0cmNYUlVjSUh3TTd4NWRBaUFJUDNua0paUFdKMmR6NDZWaEtNdHNsNXE0ZDhyWgpsY0NuOUcrcW1Hc0JoZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"],
        "admincerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNaVENDQWd1Z0F3SUJBZ0lVVWttbmo3NktVOElaMHVQL3FmYnhPYWtmYjFBd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU0TVRFeE1qQXlNVEl3TUZvWERURTVNVEV4TWpBeU1UY3dNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBUmFUa05yQ1FoK25lN2YxYWVFdEljRzZ4aFRKbXZORFJiVnVtdSsKUSt4WTJqenNjcHFJSUxLdEdBU3NlV2J2TzltV09WcmRlT1ZLck5kYmdPUWhRZVNqZ1owd2dab3dEZ1lEVlIwUApBUUgvQkFRREFnZUFNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZPOHg1MUx0a2J2dk1hWmJXZTFmCmwrbmlweHdlTUI4R0ExVWRJd1FZTUJhQUZPUklReHNOcHNrMHVXYUpUaXJEd29oQUhGRExNRG9HQTFVZEVRUXoKTURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0WWpjNVlqVm1PVGcyTFhOawpOSFkyTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFEWDN2dHJvaVNydXZFS09aRzNlT0pWeWQ2N3BMMUI4VUpqCmI4cjlka3VLa3dJZ0tnZlp2clB3WCtpdjI0Uk5vK3JMVlNJUEdUTnE5VVVIK2RkbzEveThHN2M9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"],
        "intermediatecerts": [""]
      },
      "tls": {
        "keystore": ["LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZzZXWGJKa2NkR1hjOGVTYlYKQTEzMzdRWUczSXJEaVZTQURwNDBuVW1RSmIyaFJBTkNBQVRabzFtMWt0YTVXZmYyWDZMNG1RN09aVE56NlhLMwpuS1BTYTVucjZuUC9hUjJ1bVM4ZUtqMitBVzYwRXd4VklreGxEVXhsRFpqZVhWZnZMdDRTYWloeAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg=="],
        "signcerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDRENDQXErZ0F3SUJBZ0lVZExQY3pjd0FaWXdrdTZ1dVplYWNiV2sxYmwwd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURVd05EQXdXaGNOTVRreE1URXlNRFV3T1RBd1dqQjhNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVM0d0N3WURWUVFMRXdSd1pXVnlNQXNHQTFVRUN4TUViM0puTVRBU0JnTlZCQXNUQzJSbGNHRnlkRzFsCmJuUXhNUTR3REFZRFZRUURFd1Z2Y21SbGNqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJObWoKV2JXUzFybFo5L1pmb3ZpWkRzNWxNM1BwY3JlY285SnJtZXZxYy85cEhhNlpMeDRxUGI0QmJyUVRERlVpVEdVTgpUR1VObU41ZFYrOHUzaEpxS0hHamdnRWxNSUlCSVRBT0JnTlZIUThCQWY4RUJBTUNBNmd3SFFZRFZSMGxCQll3CkZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZDdUIKT2pmQ29qMjZXbDZraWtGc2RzbktOalQ0TUI4R0ExVWRJd1FZTUJhQUZPdGM0ajY3SFJkd1E2NEVvZXk3VTZZSgo5VnNyTURvR0ExVWRFUVF6TURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sakxXTmhMV1JsY0d4dmVXMWxiblF0CllqYzVZalZtT1RnMkxYTmtOSFkyTUdZR0NDb0RCQVVHQndnQkJGcDdJbUYwZEhKeklqcDdJbWhtTGtGbVptbHMKYVdGMGFXOXVJam9pYjNKbk1TNWtaWEJoY25SdFpXNTBNU0lzSW1obUxrVnVjbTlzYkcxbGJuUkpSQ0k2SW05eQpaR1Z5SWl3aWFHWXVWSGx3WlNJNkluQmxaWElpZlgwd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ1NoS3VDQ3FoCjlkbVBFUXA5eE55cHA1TDEyOHhBQXIvb2xxeDdtMjcyREgwQ0lIYzFzeFZqeG5TNE1rZ044VnZNZlFvR2lZeUQKK0M1T1ZhdndCeHFhYW5OZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "cacerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNKVENDQWN5Z0F3SUJBZ0lVSlN2WjVvSHZEMmxDdW5qQnphZW02QXpyR1pRd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURFMU1qQXdXaGNOTXpNeE1UQTRNREUxTWpBd1dqQmtNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVE4d0RRWURWUVFMRXdaR1lXSnlhV014RlRBVEJnTlZCQU1UREhSc2MyTmhMV052YlcxdmJqQlpNQk1HCkJ5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCR0dxVDB4SU5mTXVQRTVqdVFCZ0dzK2tiRzY5WDdMMlBhSm4KQllrRGVGYzdGNGdybFovc0hqcTU4K1FCRStyazFzd3VqQjhrbW9ranhjOWsxN1hPRDllalhEQmFNQTRHQTFVZApEd0VCL3dRRUF3SUJCakFTQmdOVkhSTUJBZjhFQ0RBR0FRSC9BZ0VCTUIwR0ExVWREZ1FXQkJUclhPSSt1eDBYCmNFT3VCS0hzdTFPbUNmVmJLekFWQmdOVkhSRUVEakFNaHdRSkRCTXNod1FLRkZGYU1Bb0dDQ3FHU000OUJBTUMKQTBjQU1FUUNJRmRocFV1UHpWOVBDQmxmZWhkcnBFdHNrUjFaYWRySDM5VU96OUoySnV5NUFpQjU0QnZLdmtaTwpNdVU2bEZMNVFwTEN3TmRCZUlaclNydzhzVlNmNTZEcWVRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="],
        "admincerts": ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNnRENDQWlhZ0F3SUJBZ0lVTmxySmFWZmtPeWhBVVZvSGNvYVErRGdUWkxJd0NnWUlLb1pJemowRUF3SXcKWkRFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJVd0V3WURWUVFERXd4MGJITmpZUzFqCmIyMXRiMjR3SGhjTk1UZ3hNVEV5TURJeE5qQXdXaGNOTVRreE1URXlNREl5TVRBd1dqQmRNUXN3Q1FZRFZRUUcKRXdKVlV6RVhNQlVHQTFVRUNCTU9UbTl5ZEdnZ1EyRnliMnhwYm1FeEZEQVNCZ05WQkFvVEMwaDVjR1Z5YkdWawpaMlZ5TVE4d0RRWURWUVFMRXdaamJHbGxiblF4RGpBTUJnTlZCQU1UQldGa2JXbHVNRmt3RXdZSEtvWkl6ajBDCkFRWUlLb1pJemowREFRY0RRZ0FFWHBjUkM2cDd5bGwxUzZ3SmVldGVZQnRUR2tzTVorMk9RTGd1UVcrRWlNZ00KajdESXl3aWk0OUtzbmhtTFJLLzdqMndJMmJ4c0xFcDc3cGExNURaR0JxT0J2RENCdVRBT0JnTlZIUThCQWY4RQpCQU1DQTZnd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDCk1BQXdIUVlEVlIwT0JCWUVGRDRIOGpieUhlQTlhY3dDcmU0SVgxamhnQmJJTUI4R0ExVWRJd1FZTUJhQUZPdGMKNGo2N0hSZHdRNjRFb2V5N1U2WUo5VnNyTURvR0ExVWRFUVF6TURHQ0wyeHBZMmgxYm13dFkyRXRabUZpY21sagpMV05oTFdSbGNHeHZlVzFsYm5RdFlqYzVZalZtT1RnMkxYTmtOSFkyTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklRQ2txc3l2RG52bkJUSEgycDBSSDlwQjlJRFFqd083d0UxODZRWnBRWi9kdVFJZ1c2dW05NXBBSEFjcCs2NlkKOW4xcTVVSzFMQWJRZ0wwWm94OTVNazhZZzU4PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="],
        "intermediatecerts": [""]
      }
    }
}" diff --git a/testdata/deploy/role.yaml b/testdata/deploy/role.yaml new file mode 100644 index 00000000..1996bea1 --- /dev/null +++ b/testdata/deploy/role.yaml @@ -0,0 +1,192 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator + labels: + release: "operator" +rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "" + resources: + - pods + - pods/log + - persistentvolumeclaims + - persistentvolumes + - services + - endpoints + - events + - configmaps + - secrets + - nodes + - serviceaccounts + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "batch" + resources: + - jobs + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "authorization.openshift.io" + - "rbac.authorization.k8s.io" + resources: + - roles + - rolebindings + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - bind + - escalate + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - apps + resourceNames: + - operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - ibp.com + resources: + - ibpcas.ibp.com + - ibppeers.ibp.com + - ibporderers.ibp.com + - ibpconsoles.ibp.com + - ibpcas + - ibppeers + - ibporderers + - ibpconsoles + - ibpcas/finalizers + - ibppeers/finalizers + - ibporderers/finalizers + - ibpconsoles/finalizers + - ibpcas/status + - ibppeers/status + - ibporderers/status + - ibpconsoles/status + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - extensions + - networking.k8s.io + - config.openshift.io + resources: + - ingresses + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection diff --git a/testdata/deploy/role_binding.yaml b/testdata/deploy/role_binding.yaml new file mode 100644 index 00000000..a5388249 --- /dev/null +++ b/testdata/deploy/role_binding.yaml @@ -0,0 +1,30 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operator-<<>> +subjects: + - kind: ServiceAccount + name: operator + namespace: <<>> +roleRef: + kind: ClusterRole + name: operator + apiGroup: rbac.authorization.k8s.io diff --git a/testdata/deploy/role_ocp.yaml b/testdata/deploy/role_ocp.yaml new file mode 100644 index 00000000..ebfd58c8 --- /dev/null +++ b/testdata/deploy/role_ocp.yaml @@ -0,0 +1,152 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator + labels: + release: "operator" +rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - routes.route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "" + resources: + - pods + - pods/log + - services + - endpoints + - persistentvolumeclaims + - persistentvolumes + - events + - configmaps + - secrets + - ingresses + - roles + - rolebindings + - serviceaccounts + - nodes + - jobs + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - apps + resourceNames: + - operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - ibp.com + resources: + - ibpcas.ibp.com + - ibppeers.ibp.com + - ibporderers.ibp.com + - ibpconsoles.ibp.com + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection + - apiGroups: + - config.openshift.io + resources: + - v1 + verbs: + - get + - list + - create + - update + - patch + - watch + - delete + - deletecollection diff --git a/testdata/deploy/service_account.yaml b/testdata/deploy/service_account.yaml new file mode 100644 index 00000000..972d6bf7 --- /dev/null +++ b/testdata/deploy/service_account.yaml @@ -0,0 +1,22 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operator diff --git a/testdata/deployercm/deployer-configmap.yaml b/testdata/deployercm/deployer-configmap.yaml new file mode 100644 index 00000000..8b30a3af --- /dev/null +++ b/testdata/deployercm/deployer-configmap.yaml @@ -0,0 +1,185 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ConfigMap +metadata: + name: ibpconsole-deployer-template +data: + settings.yaml: | + db: + connectionurl: "http://localhost:5984" + createdb: true + components: + name: "components" + designdocs: + - ./designdocs/components/service_broker.json + - ./designdocs/components/deployer.json + - ./designdocs/components/plutus.json + - ./designdocs/components/search_indices.json + port: 8080 + loglevel: debug + tls: + enabled: false + certpath: /certs/tls.crt + keypath: /certs/tls.key + auth: + username: dev + password: dev123 + + imagePullSecret: "" + usetags: false + versions: + ca: + 1.5.2-6: + default: true + version: 1.5.2-6 + image: + caInitImage: fabric-init + caInitTag: latest + caImage: fabric-ca + caTag: latest + enrollerImage: fabric-enroller + enrollerTag: latest + peer: + 2.2.5-1: + default: true + version: 2.2.5-1 + image: + peerInitImage: fabric-init + peerInitTag: latest + peerImage: fabric-peer + peerTag: latest + couchdbImage: fabric-couchdb + couchdbTag: 3.1.2 + grpcwebImage: fabric-grpcweb + grpcwebTag: latest + + orderer: + 2.2.5-1: + default: true + version: 2.2.5-1 + image: + ordererInitImage: fabric-init + ordererInitTag: latest + ordererImage: fabric-orderer + ordererTag: latest + grpcwebImage: fabric-grpcweb + grpcwebTag: latest + + defaults: + storage: + ca: + ca: + size: 1Gi + class: "" + peer: + statedb: + size: 10Gi + class: "" + peer: + size: 10Gi + class: "" + orderer: + orderer: + size: 10Gi + class: "" + resources: + ca: + ca: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M + init: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M + peer: + peer: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + couchdb: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + proxy: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + init: + limits: + cpu: 100m + memory: 200M + requests: + cpu: 100m + memory: 200M + chaincodelauncher: + limits: + cpu: 200m + memory: 400M + requests: + cpu: 200m + memory: 400M + orderer: + orderer: + limits: + cpu: 250m + memory: 500M + ephemeral-storage: 1G + requests: + cpu: 250m + memory: 500M + ephemeral-storage: 100M + proxy: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M + init: + limits: + cpu: 100m + memory: 200M + ephemeral-storage: 1G + requests: + cpu: 100m + memory: 200M + ephemeral-storage: 100M diff --git a/testdata/init/ca/cert.pem b/testdata/init/ca/cert.pem new file mode 100644 index 00000000..570de548 --- /dev/null +++ b/testdata/init/ca/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIJAO9Ho8OPFKlmMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOQzEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwNDIyMTcyMTQwWhcNMjAw +NDIxMTcyMTQwWjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCTkMxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA306qNWOGxOSu9Er4tBUJTISy +XqR2u70gInAW8T7AZYLJsPKJ7CfIVJjgnLnNaSmcoeZNTyj+uUtCU9a6iH7rPlT6 +8iCL7MzHQOdux1qc9943hWRJ1/DAeLe9TrddeO7xSjzzYedUHqQyfGVhr27rrUKl +HUv+h9CkbpMqIS+LvoXr/fMeJTailwD2OdL57RGCVwSdMbVst93d7v0xPiB7Erg3 +37i/m9BUsxPWcfmnalC1gSTaMWCVg2+4les/bLZ8lR2MGUYnuKxHFII0zvYLsFbm +oOBh8dYroUFzWQi3WN5D2U9XjdzaLWy6AVaj3KqnU9egvyQoRotIh8kyNxajuwID +AQABo2YwZDAdBgNVHQ4EFgQUVCqYcL0sONnMOeLfSIf3wuVCqA4wHwYDVR0jBBgw +FoAUVCqYcL0sONnMOeLfSIf3wuVCqA4wDAYDVR0TBAUwAwEB/zAUBgNVHREEDTAL +gglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBACGy3qUgMrFo83oC8EjJr/Vb +kjavLA/NgtxAli2mYLOC0aq/K0xFEQfYMH+vyom/YH6j479gVxqVQZX3BaUmCTR5 +5r0ck8/FkFzzZ5UHbUzcnRLHUdek0v0VkjvFrhqpOpkXXNGAH1JiWlydB4QvcWIr +Jld3ospXy/Vpi3RvxPdjbR3M9O3ASEsbPvWX4L7HQPbQA4ePXWt4MV1M+nZa8OfP +7IfSMddt9b3x4pvPTIFeBPNzm5eSv1uMHFFi+7TvWt0GFuq/ceFZ7jUTcvyYXx2Z +UncC/tZEh08kf3RD45gzLVrBJAiDsIpCCjSzwYOz87wdgnQhJ10kYooqV9M5NbA= +-----END CERTIFICATE----- diff --git a/testdata/init/ca/key.pem b/testdata/init/ca/key.pem new file mode 100644 index 00000000..4a7f4c30 --- /dev/null +++ b/testdata/init/ca/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDfTqo1Y4bE5K70 +Svi0FQlMhLJepHa7vSAicBbxPsBlgsmw8onsJ8hUmOCcuc1pKZyh5k1PKP65S0JT +1rqIfus+VPryIIvszMdA527HWpz33jeFZEnX8MB4t71Ot1147vFKPPNh51QepDJ8 +ZWGvbuutQqUdS/6H0KRukyohL4u+hev98x4lNqKXAPY50vntEYJXBJ0xtWy33d3u +/TE+IHsSuDffuL+b0FSzE9Zx+adqULWBJNoxYJWDb7iV6z9stnyVHYwZRie4rEcU +gjTO9guwVuag4GHx1iuhQXNZCLdY3kPZT1eN3NotbLoBVqPcqqdT16C/JChGi0iH +yTI3FqO7AgMBAAECggEBAKqQoX3KGYSG0AOUyEZ00vd5W+ziXjakDMmBKGT4c2iV +74ySlTqmYGQNqXLMkNNEj3kyOda/D2Fk1LLJDw13NCQWMN+EcJxdBczTeVAYLFE/ +kNT4d1bTc5BBd/2KHYFjsTXtPFGJqj0FWfKeVXD0nltmzkuEYlRGz3IOQl26vVi1 +8Ng+YE5FFiJzadHjILajHy80MqaH5QJbcdTDJ0YiwT20xeT1Y9rZLU9B3E3aXZTg +a4hf+oeSLr3e1Y8ludFL6IYMI1LsJO9xefE40a6rfLF+D/10+Dkrqpw5K4k934bT +ZvIZfh/OWLXwGGRhjsXUiZDBWd4KcaAg83MKREed/iECgYEA9d1eC2WLqpZO+nef +8w9eAGJrzmF9ZBdGuOUOJBhx1SIDD80wuaEPD9LFpVqzm+pDFWX6tkxUMG8pa0D7 +XU/SrmkIV535i5FnevIwJtrFRy2KZPvdm8WkuP6AoBwnvKhApu6jBYKIWTXz5KwC +03qRHc38v6icT0GdOVJsctXxdgMCgYEA6IM/NIoGPhk24u2iJGf0rAhHbF/B+6BF +YUgN8Kz61zzODcmWgzDtmNOV/Du+ZF26Q5ar/fNdOeftdqISMZcojLEghUIOG0fv +b7/+WmGQb0O/rthchHTFxv97jRySN/vJqA7EFutDMfPwbeIt5xKEpjoE15Q2X0K3 +Ezpn/eT5aekCgYEA2wPTHrvaap9yw1OEvHRX/GewOSxHEr5ZVaNVsXnFDWM67vyO +Bw4d6K6NOftOO/m2wH0TlQjxhiO/9bbxM/JDbvJJaCNlhJqGX9MWacZALmO3ALYM +dxLc5Z5w3i+2hiwrS4kQ13usEiwpku5XpiNs2ewA1opQk5mTgNiXmsRSUBkCgYEA +w+yUffkweXNVxLLpRjQzACOZy8CE/Tt4Oxk0ZfHdDTG3j/amVbYNABKK+Bh/vqMc +KYf1NfC0Re8hMi4vlooBKUy4UpHuwR5ErK3j7tT3mEpGY1b93r9nSOBPhADnVTVe +H7cRlR55aMZderp0Y5o/HLMXEXFav7q/+fvlVRR09sECgYEA80LefOxtRDo2wjTd +G+GrLCjCjKPXmT2q4J5G0uXNSOB+sZXh42EPl9F/iTUAe8nxDlm+9CnGfh+02Edw +RF76TZmpUwdkyC0KoCwWco3wGB/U9ZY8Amp0ehWdSNLyy8cjuHOUlU04egUaINok +tta0KQibgldQ64lmNE9ct/5igqg= +-----END PRIVATE KEY----- diff --git a/testdata/init/ca/override.yaml b/testdata/init/ca/override.yaml new file mode 100644 index 00000000..d795ca95 --- /dev/null +++ b/testdata/init/ca/override.yaml @@ -0,0 +1,47 @@ +############################################################################# +# This is a configuration file for the fabric-ca-server command. +# +# COMMAND LINE ARGUMENTS AND ENVIRONMENT VARIABLES +# ------------------------------------------------ +# Each configuration element can be overridden via command line +# arguments or environment variables. The precedence for determining +# the value of each element is as follows: +# 1) command line argument +# Examples: +# a) --port 443 +# To set the listening port +# b) --ca.keyfile ../mykey.pem +# To set the "keyfile" element in the "ca" section below; +# note the '.' separator character. +# 2) environment variable +# Examples: +# a) FABRIC_CA_SERVER_PORT=443 +# To set the listening port +# b) FABRIC_CA_SERVER_CA_KEYFILE="../mykey.pem" +# To set the "keyfile" element in the "ca" section below; +# note the '_' separator character. +# 3) configuration file +# 4) default value (if there is one) +# All default values are shown beside each element below. +# +# FILE NAME ELEMENTS +# ------------------ +# The value of all fields whose name ends with "file" or "files" are +# name or names of other files. +# For example, see "tls.certfile" and "tls.clientauth.certfiles". +# The value of each of these fields can be a simple filename, a +# relative path, or an absolute path. If the value is not an +# absolute path, it is interpretted as being relative to the location +# of this configuration file. +# +############################################################################# + +db: + type: "postgres" + datasource: "host=localhost port=5432 user=Username password=Password dbname=fabric_ca sslmode=verify-full" + tls: + enabled: true + certfiles: ["LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBekNDQWV1Z0F3SUJBZ0lKQU9xQ1VmaFNjcWtlTUEwR0NTcUdTSWIzRFFFQkJRVUFNQmd4RmpBVUJnTlYKQkFNTURYQnZjM1JuY21WekxuUmxjM1F3SGhjTk1Ua3dOekl6TVRrd09UVTRXaGNOTWprd056SXdNVGt3T1RVNApXakFZTVJZd0ZBWURWUVFEREExd2IzTjBaM0psY3k1MFpYTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF0UkFQOUx6ZTJkRzVybWtuZy91VW1EQVlTRXBQSWpEV1RQOGpSMzFxQnliNzdhZSsKeTdRNG9Gdmh3WUNVSGxRZVNaMUp5N1RQekRyK2hSTmF0Mlg0Z0ZhSkZiZUVsL0NIcndGTWY3M3NBK1ZXWkd2eQp1eG1uMHZsR1gxbnNISjlpR0hRL2pHYW9XUUljOVVuekdaLzJZK2VmSnE5Z3dwME16YXNZZmR1eit1dUE2WnhUCnlNN0M5YWVabFgvZkxiZWRJdVdPNXNpeE9KVlB5RWlxamR3RGJjUDFjL2ZEK1IybUNuYzdUai9KdUsrWmhMbE8KeEZxWUVGa1E4cGZKL0tjWlptUXVBRFlUWHpEano4Q3FxNFNFTnJLMjRvaFBCQ3ZIaDJqemVaOEZ0ZHgyalJIVAppd0JlZkRhaVJZUFI5Qzh6eTgrVnZaa3pLSFBXc3loQ2I1Qys3UUlEQVFBQm8xQXdUakFkQmdOVkhRNEVGZ1FVCi9mZ01BcExIMXBvcFFoS25KTmgrVk04QUtQZ3dId1lEVlIwakJCZ3dGb0FVL2ZnTUFwTEgxcG9wUWhLbkpOaCsKVk04QUtQZ3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQkFRVUZBQU9DQVFFQURjOUc4M05LaWw3ZQpoVFlvR1piejhFV1o4c0puVnY4azMwRDlydUY1OXFvT0ppZGorQUhNbzNHOWtud1lvbGFGbmJwb093cElOZ3g1CnYvL21aU3VldlFMZUZKRlN1UjBheVQ1WFYxcjljNUZGQ2JSaEp0cE4rOEdTT29tRUFSYTNBVGVFSG5WeVpaYkMKWkFQQUxMVXlVeUVrSDR3Q0RZUGtYa3dWQVVlR2FGVmNqZWR0eGJ3Z2k0dG0rSFZoTEt5Y0NoZ25YUVhxQ2srTwo2RHJIc0Z0STVTNWQvQlBPbE1Yc28vNUFielBGelpVVVg4OEhkVUhWSWlqM0luMXdUbWhtREtwdzZ6dmcvNjIxCjRhcGhDOWJ2bXAxeUVOUklzb0xiMGlMWVAzRSswU0ZkZC9IRnRhVXV3eUx6cnl4R2xrdG1BVUJWNVdYZEQxMkIKTU1mQnhvNFVYUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"] + client: + keyfile: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdFJBUDlMemUyZEc1cm1rbmcvdVVtREFZU0VwUElqRFdUUDhqUjMxcUJ5Yjc3YWUrCnk3UTRvRnZod1lDVUhsUWVTWjFKeTdUUHpEcitoUk5hdDJYNGdGYUpGYmVFbC9DSHJ3Rk1mNzNzQStWV1pHdnkKdXhtbjB2bEdYMW5zSEo5aUdIUS9qR2FvV1FJYzlVbnpHWi8yWStlZkpxOWd3cDBNemFzWWZkdXordXVBNlp4VAp5TTdDOWFlWmxYL2ZMYmVkSXVXTzVzaXhPSlZQeUVpcWpkd0RiY1AxYy9mRCtSMm1DbmM3VGovSnVLK1poTGxPCnhGcVlFRmtROHBmSi9LY1pabVF1QURZVFh6RGp6OENxcTRTRU5ySzI0b2hQQkN2SGgyanplWjhGdGR4MmpSSFQKaXdCZWZEYWlSWVBSOUM4enk4K1Z2Wmt6S0hQV3N5aENiNUMrN1FJREFRQUJBb0lCQUZROGhzL2IxdW9Mc3BFOApCdEJXaVVsTWh0K0xBc25yWXFncnd5UU5hdmlzNEdRdXVJdFk2MGRmdCtZb2hjQ2ViZ0RkbG1tWlUxdTJ6cGJtCjdEdUt5MVFaN21rV0dpLytEWUlUM3AxSHBMZ2pTRkFzRUorUFRnN1BQamc2UTZrRlZjUCt3Vm4yb0xmWVRkU28KZE5zbEdxSmNMaVQzVHRMNzhlcjFnTTE5RzN6T3J1ZndrSGJSYU1BRmtvZ1ExUlZLSWpnVGUvbmpIMHFHNW9JagoxNEJLeFFKTUZFTG1pQk50NUx5OVMxWWdxTDRjbmNtUDN5L1QyNEdodVhNckx0eTVOeVhnS0dFZ1pUTDMzZzZvCnYreDFFMFRURWRjMVQvWVBGWkdBSXhHdWRKNWZZZ2JtWU9LZ09mUHZFOE9TbEV6OW56aHNnckVZYjdQVThpZDUKTHFycVJRRUNnWUVBNjIyT3RIUmMxaVY1ZXQxdHQydTVTTTlTS2h2b0lPT3d2Q3NnTEI5dDJzNEhRUlRYN0RXcAo0VDNpUC9leEl5OXI3bTIxNFo5MEgzZlpVNElSUkdHSUxKUVMrYzRQNVA4cHJFTDcyd1dIWlpQTTM3QlZTQ1U3CkxOTXl4TkRjeVdjSUJIVFh4NUY2eXhLNVFXWTg5MVB0eDlDamJFSEcrNVJVdDA4UVlMWDlUQTBDZ1lFQXhPSmYKcXFjeThMOVZyYUFVZG9lbGdIU0NGSkJRR3hMRFNSQlJSTkRIOUJhaWlZOCtwZzd2TExTRXFMRFpsbkZPbFkrQQpiRENEQ0RtdHhwRXViY0x6b3FnOXhlQTZ0eXZZWkNWalY5dXVzNVh1Wmk1VDBBUHhCdm56OHNNa3dRY3RQWkRQCk8zQTN4WllkZzJBRmFrV1BmT1FFbjVaK3F4TU13SG9VZ1ZwQkptRUNnWUJ2Q2FjcTJVOEgrWGpJU0ROOU5TT1kKZ1ovaEdIUnRQcmFXcVVodFJ3MkxDMjFFZHM0NExEOUphdVNSQXdQYThuelhZWXROTk9XU0NmYkllaW9tdEZHRApwUHNtTXRnd1MyQ2VUS0Y0OWF5Y2JnOU0yVi8vdlAraDdxS2RUVjAwNkpGUmVNSms3K3FZYU9aVFFDTTFDN0swCmNXVUNwQ3R6Y014Y0FNQmF2THNRNlFLQmdHbXJMYmxEdjUxaXM3TmFKV0Z3Y0MwL1dzbDZvdVBFOERiNG9RV1UKSUowcXdOV2ZvZm95TGNBS3F1QjIrbkU2SXZrMmFiQ25ZTXc3V0w4b0VJa3NodUtYOVgrTVZ6Y1VPekdVdDNyaQpGeU9mcHJJRXowcm5zcWNSNUJJNUZqTGJqVFpyMEMyUWp2NW5FVFAvaHlpQWFRQ1l5THAyWlVtZ0Vjb0VPNWtwClBhcEJBb0dBZVV0WjE0SVp2cVorQnAxR1VqSG9PR0pQVnlJdzhSRUFETjRhZXRJTUlQRWFVaDdjZUtWdVN6VXMKci9WczA1Zjg0cFBVaStuUTUzaGo2ZFhhYTd1UE1aMFBnNFY4cS9UdzJMZ3BWWndVd0ltZUQrcXNsbldha3VWMQpMSnp3SkhOa3pOWE1OMmJWREFZTndSamNRSmhtbzF0V2xHYlpRQjNoSkEwR2thWGZPa2c9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" + certfile: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBekNDQWV1Z0F3SUJBZ0lKQU9xQ1VmaFNjcWtlTUEwR0NTcUdTSWIzRFFFQkJRVUFNQmd4RmpBVUJnTlYKQkFNTURYQnZjM1JuY21WekxuUmxjM1F3SGhjTk1Ua3dOekl6TVRrd09UVTRXaGNOTWprd056SXdNVGt3T1RVNApXakFZTVJZd0ZBWURWUVFEREExd2IzTjBaM0psY3k1MFpYTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF0UkFQOUx6ZTJkRzVybWtuZy91VW1EQVlTRXBQSWpEV1RQOGpSMzFxQnliNzdhZSsKeTdRNG9Gdmh3WUNVSGxRZVNaMUp5N1RQekRyK2hSTmF0Mlg0Z0ZhSkZiZUVsL0NIcndGTWY3M3NBK1ZXWkd2eQp1eG1uMHZsR1gxbnNISjlpR0hRL2pHYW9XUUljOVVuekdaLzJZK2VmSnE5Z3dwME16YXNZZmR1eit1dUE2WnhUCnlNN0M5YWVabFgvZkxiZWRJdVdPNXNpeE9KVlB5RWlxamR3RGJjUDFjL2ZEK1IybUNuYzdUai9KdUsrWmhMbE8KeEZxWUVGa1E4cGZKL0tjWlptUXVBRFlUWHpEano4Q3FxNFNFTnJLMjRvaFBCQ3ZIaDJqemVaOEZ0ZHgyalJIVAppd0JlZkRhaVJZUFI5Qzh6eTgrVnZaa3pLSFBXc3loQ2I1Qys3UUlEQVFBQm8xQXdUakFkQmdOVkhRNEVGZ1FVCi9mZ01BcExIMXBvcFFoS25KTmgrVk04QUtQZ3dId1lEVlIwakJCZ3dGb0FVL2ZnTUFwTEgxcG9wUWhLbkpOaCsKVk04QUtQZ3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQkFRVUZBQU9DQVFFQURjOUc4M05LaWw3ZQpoVFlvR1piejhFV1o4c0puVnY4azMwRDlydUY1OXFvT0ppZGorQUhNbzNHOWtud1lvbGFGbmJwb093cElOZ3g1CnYvL21aU3VldlFMZUZKRlN1UjBheVQ1WFYxcjljNUZGQ2JSaEp0cE4rOEdTT29tRUFSYTNBVGVFSG5WeVpaYkMKWkFQQUxMVXlVeUVrSDR3Q0RZUGtYa3dWQVVlR2FGVmNqZWR0eGJ3Z2k0dG0rSFZoTEt5Y0NoZ25YUVhxQ2srTwo2RHJIc0Z0STVTNWQvQlBPbE1Yc28vNUFielBGelpVVVg4OEhkVUhWSWlqM0luMXdUbWhtREtwdzZ6dmcvNjIxCjRhcGhDOWJ2bXAxeUVOUklzb0xiMGlMWVAzRSswU0ZkZC9IRnRhVXV3eUx6cnl4R2xrdG1BVUJWNVdYZEQxMkIKTU1mQnhvNFVYUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" diff --git a/testdata/init/orderer/configtx.yaml b/testdata/init/orderer/configtx.yaml new file mode 100644 index 00000000..df1fbe3a --- /dev/null +++ b/testdata/init/orderer/configtx.yaml @@ -0,0 +1,275 @@ +################################################################################ +# +# ORGANIZATIONS +# +# This section defines the organizational identities that can be referenced +# in the configuration profiles. +# +################################################################################ +Organizations: +################################################################################ +# +# CAPABILITIES +# +# This section defines the capabilities of fabric network. This is a new +# concept as of v1.1.0 and should not be utilized in mixed networks with +# v1.0.x peers and orderers. Capabilities define features which must be +# present in a fabric binary for that binary to safely participate in the +# fabric network. For instance, if a new MSP type is added, newer binaries +# might recognize and validate the signatures from this type, while older +# binaries without this support would be unable to validate those +# transactions. This could lead to different versions of the fabric binaries +# having different world states. Instead, defining a capability for a channel +# informs those binaries without this capability that they must cease +# processing transactions until they have been upgraded. For v1.0.x if any +# capabilities are defined (including a map with all capabilities turned off) +# then the v1.0.x peer will deliberately crash. +# +################################################################################ +Capabilities: + # Channel capabilities apply to both the orderers and the peers and must be + # supported by both. + # Set the value of the capability to true to require it. + Channel: &ChannelCapabilities + V1_4_3: true + V1_3: false + V1_1: false + + # Orderer capabilities apply only to the orderers, and may be safely + # used with prior release peers. + # Set the value of the capability to true to require it. + Orderer: &OrdererCapabilities + V1_4_2: true + V1_1: false + + # Application capabilities apply only to the peer network, and may be safely + # used with prior release orderers. + # Set the value of the capability to true to require it. + Application: &ApplicationCapabilities + # V1.4.2 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.4.2 + V1_4_2: true + # V1.3 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.3. + V1_3: false + # V1.2 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.2 (note, this need not be set if + # later version capabilities are set) + V1_2: false + # V1.1 for Application enables the new non-backwards compatible + # features and fixes of fabric v1.1 (note, this need not be set if + # later version capabilities are set). + V1_1: false + +################################################################################ +# +# APPLICATION +# +# This section defines the values to encode into a config transaction or +# genesis block for application-related parameters. +# +################################################################################ +Application: &ApplicationDefaults + ACLs: &ACLsDefault + # This section provides defaults for policies for various resources + # in the system. These "resources" could be functions on system chaincodes + # (e.g., "GetBlockByNumber" on the "qscc" system chaincode) or other resources + # (e.g.,who can receive Block events). This section does NOT specify the resource's + # definition or API, but just the ACL policy for it. + # + # User's can override these defaults with their own policy mapping by defining the + # mapping under ACLs in their channel definition + + #---Lifecycle System Chaincode (lscc) function to policy mapping for access control---# + + # ACL policy for lscc's "getid" function + lscc/ChaincodeExists: /Channel/Application/Readers + + # ACL policy for lscc's "getdepspec" function + lscc/GetDeploymentSpec: /Channel/Application/Readers + + # ACL policy for lscc's "getccdata" function + lscc/GetChaincodeData: /Channel/Application/Readers + + # ACL Policy for lscc's "getchaincodes" function + lscc/GetInstantiatedChaincodes: /Channel/Application/Readers + + #---Query System Chaincode (qscc) function to policy mapping for access control---# + + # ACL policy for qscc's "GetChainInfo" function + qscc/GetChainInfo: /Channel/Application/Readers + + # ACL policy for qscc's "GetBlockByNumber" function + qscc/GetBlockByNumber: /Channel/Application/Readers + + # ACL policy for qscc's "GetBlockByHash" function + qscc/GetBlockByHash: /Channel/Application/Readers + + # ACL policy for qscc's "GetTransactionByID" function + qscc/GetTransactionByID: /Channel/Application/Readers + + # ACL policy for qscc's "GetBlockByTxID" function + qscc/GetBlockByTxID: /Channel/Application/Readers + + #---Configuration System Chaincode (cscc) function to policy mapping for access control---# + + # ACL policy for cscc's "GetConfigBlock" function + cscc/GetConfigBlock: /Channel/Application/Readers + + # ACL policy for cscc's "GetConfigTree" function + cscc/GetConfigTree: /Channel/Application/Readers + + # ACL policy for cscc's "SimulateConfigTreeUpdate" function + cscc/SimulateConfigTreeUpdate: /Channel/Application/Readers + + #---Miscellanesous peer function to policy mapping for access control---# + + # ACL policy for invoking chaincodes on peer + peer/Propose: /Channel/Application/Writers + + # ACL policy for chaincode to chaincode invocation + peer/ChaincodeToChaincode: /Channel/Application/Readers + + #---Events resource to policy mapping for access control###---# + + # ACL policy for sending block events + event/Block: /Channel/Application/Readers + + # ACL policy for sending filtered block events + event/FilteredBlock: /Channel/Application/Readers + + # Organizations lists the orgs participating on the application side of the + # network. + Organizations: + + # Policies defines the set of policies at this level of the config tree + # For Application policies, their canonical path is + # /Channel/Application/ + Policies: &ApplicationDefaultPolicies + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + + # Capabilities describes the application level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *ApplicationCapabilities + +################################################################################ +# +# ORDERER +# +# This section defines the values to encode into a config transaction or +# genesis block for orderer related parameters. +# +################################################################################ +Orderer: &OrdererDefaults + + OrdererType: solo + Addresses: + BatchTimeout: 2s + BatchSize: + MaxMessageCount: 500 + AbsoluteMaxBytes: 10 MB + PreferredMaxBytes: 2 MB + MaxChannels: 0 + EtcdRaft: + Consenters: + Options: + TickInterval: 500ms + ElectionTick: 10 + HeartbeatTick: 1 + MaxInflightBlocks: 5 + SnapshotIntervalSize: 20 MB + + Organizations: + + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "ANY Admins" + # BlockValidation specifies what signatures must be included in the block + # from the orderer for the peer to validate it. + BlockValidation: + Type: ImplicitMeta + Rule: "ANY Writers" + + # Capabilities describes the orderer level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *OrdererCapabilities + +################################################################################ +# +# CHANNEL +# +# This section defines the values to encode into a config transaction or +# genesis block for channel related parameters. +# +################################################################################ +Channel: &ChannelDefaults + # Policies defines the set of policies at this level of the config tree + # For Channel policies, their canonical path is + # /Channel/ + Policies: + # Who may invoke the 'Deliver' API + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + # Who may invoke the 'Broadcast' API + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + # By default, who may modify elements at this config level + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + + + # Capabilities describes the channel level capabilities, see the + # dedicated Capabilities section elsewhere in this file for a full + # description + Capabilities: + <<: *ChannelCapabilities + +################################################################################ +# +# PROFILES +# +# Different configuration profiles may be encoded here to be specified as +# parameters to the configtxgen tool. The profiles which specify consortiums +# are to be used for generating the orderer genesis block. With the correct +# consortium members defined in the orderer genesis block, channel creation +# requests may be generated with only the org member names and a consortium +# name. +# +################################################################################ +Profiles: + Initial: + <<: *ChannelDefaults + Orderer: + <<: *OrdererDefaults + Organizations: + Consortiums: + SampleConsortium: + Organizations: + Channel: + <<: *ChannelDefaults + Consortium: SampleConsortium + Application: + <<: *ApplicationDefaults + Organizations: diff --git a/testdata/init/orderer/msp/cacerts/cert.pem b/testdata/init/orderer/msp/cacerts/cert.pem new file mode 100644 index 00000000..570de548 --- /dev/null +++ b/testdata/init/orderer/msp/cacerts/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIJAO9Ho8OPFKlmMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOQzEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwNDIyMTcyMTQwWhcNMjAw +NDIxMTcyMTQwWjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCTkMxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA306qNWOGxOSu9Er4tBUJTISy +XqR2u70gInAW8T7AZYLJsPKJ7CfIVJjgnLnNaSmcoeZNTyj+uUtCU9a6iH7rPlT6 +8iCL7MzHQOdux1qc9943hWRJ1/DAeLe9TrddeO7xSjzzYedUHqQyfGVhr27rrUKl +HUv+h9CkbpMqIS+LvoXr/fMeJTailwD2OdL57RGCVwSdMbVst93d7v0xPiB7Erg3 +37i/m9BUsxPWcfmnalC1gSTaMWCVg2+4les/bLZ8lR2MGUYnuKxHFII0zvYLsFbm +oOBh8dYroUFzWQi3WN5D2U9XjdzaLWy6AVaj3KqnU9egvyQoRotIh8kyNxajuwID +AQABo2YwZDAdBgNVHQ4EFgQUVCqYcL0sONnMOeLfSIf3wuVCqA4wHwYDVR0jBBgw +FoAUVCqYcL0sONnMOeLfSIf3wuVCqA4wDAYDVR0TBAUwAwEB/zAUBgNVHREEDTAL +gglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBACGy3qUgMrFo83oC8EjJr/Vb +kjavLA/NgtxAli2mYLOC0aq/K0xFEQfYMH+vyom/YH6j479gVxqVQZX3BaUmCTR5 +5r0ck8/FkFzzZ5UHbUzcnRLHUdek0v0VkjvFrhqpOpkXXNGAH1JiWlydB4QvcWIr +Jld3ospXy/Vpi3RvxPdjbR3M9O3ASEsbPvWX4L7HQPbQA4ePXWt4MV1M+nZa8OfP +7IfSMddt9b3x4pvPTIFeBPNzm5eSv1uMHFFi+7TvWt0GFuq/ceFZ7jUTcvyYXx2Z +UncC/tZEh08kf3RD45gzLVrBJAiDsIpCCjSzwYOz87wdgnQhJ10kYooqV9M5NbA= +-----END CERTIFICATE----- diff --git a/testdata/init/orderer/orderer.yaml b/testdata/init/orderer/orderer.yaml new file mode 100644 index 00000000..a99f8981 --- /dev/null +++ b/testdata/init/orderer/orderer.yaml @@ -0,0 +1,401 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +################################################################################ +# +# Orderer Configuration +# +# - This controls the type and configuration of the orderer. +# +################################################################################ +General: + + # Ledger Type: The ledger type to provide to the orderer. + # Two non-production ledger types are provided for test purposes only: + # - ram: An in-memory ledger whose contents are lost on restart. + # - json: A simple file ledger that writes blocks to disk in JSON format. + # Only one production ledger type is provided: + # - file: A production file-based ledger. + LedgerType: file + + # Listen address: The IP on which to bind to listen. + ListenAddress: 127.0.0.1 + + # Listen port: The port on which to bind to listen. + ListenPort: 7050 + + # TLS: TLS settings for the GRPC server. + TLS: + Enabled: true + # PrivateKey governs the file location of the private key of the TLS certificate. + PrivateKey: tls/server.key + # Certificate governs the file location of the server TLS certificate. + Certificate: tls/server.crt + RootCAs: + - tls/ca.crt + ClientAuthRequired: true + ClientRootCAs: + - tls/client.crt + # Keepalive settings for the GRPC server. + Keepalive: + # ServerMinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the server will + # disconnect them. + ServerMinInterval: 60s + # ServerInterval is the time between pings to clients. + ServerInterval: 7200s + # ServerTimeout is the duration the server waits for a response from + # a client before closing the connection. + ServerTimeout: 20s + # Cluster settings for ordering service nodes that communicate with other ordering service nodes + # such as Raft based ordering service. + Cluster: + # SendBufferSize is the maximum number of messages in the egress buffer. + # Consensus messages are dropped if the buffer is full, and transaction + # messages are waiting for space to be freed. + SendBufferSize: 10 + # ClientCertificate governs the file location of the client TLS certificate + # used to establish mutual TLS connections with other ordering service nodes. + ClientCertificate: + # ClientPrivateKey governs the file location of the private key of the client TLS certificate. + ClientPrivateKey: + # The below 4 properties should be either set together, or be unset together. + # If they are set, then the orderer node uses a separate listener for intra-cluster + # communication. If they are unset, then the general orderer listener is used. + # This is useful if you want to use a different TLS server certificates on the + # client-facing and the intra-cluster listeners. + + # ListenPort defines the port on which the cluster listens to connections. + ListenPort: + # ListenAddress defines the IP on which to listen to intra-cluster communication. + ListenAddress: + # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster + # communication. + ServerCertificate: + # ServerPrivateKey defines the file location of the private key of the TLS certificate. + ServerPrivateKey: + # Genesis method: The method by which the genesis block for the orderer + # system channel is specified. Available options are "provisional", "file": + # - provisional: Utilizes a genesis profile, specified by GenesisProfile, + # to dynamically generate a new genesis block. + # - file: Uses the file provided by GenesisFile as the genesis block. + GenesisMethod: provisional + + # Genesis profile: The profile to use to dynamically generate the genesis + # block to use when initializing the orderer system channel and + # GenesisMethod is set to "provisional". See the configtx.yaml file for the + # descriptions of the available profiles. Ignored if GenesisMethod is set to + # "file". + GenesisProfile: SampleInsecureSolo + + # Genesis file: The file containing the genesis block to use when + # initializing the orderer system channel and GenesisMethod is set to + # "file". Ignored if GenesisMethod is set to "provisional". + GenesisFile: genesisblock + + # LocalMSPDir is where to find the private crypto material needed by the + # orderer. It is set relative here as a default for dev environments but + # should be changed to the real location in production. + LocalMSPDir: msp + + # LocalMSPID is the identity to register the local MSP material with the MSP + # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP + # ID of one of the organizations defined in the orderer system channel's + # /Channel/Orderer configuration. The sample organization defined in the + # sample configuration provided has an MSP ID of "SampleOrg". + LocalMSPID: SampleOrg + + # Enable an HTTP service for Go "pprof" profiling as documented at: + # https://golang.org/pkg/net/http/pprof + Profile: + Enabled: false + Address: 0.0.0.0:6060 + + # BCCSP configures the blockchain crypto service providers. + BCCSP: + # Default specifies the preferred blockchain crypto service provider + # to use. If the preferred provider is not available, the software + # based provider ("SW") will be used. + # Valid providers are: + # - SW: a software based crypto provider + # - PKCS11: a CA hardware security module crypto provider. + Default: SW + + # SW configures the software based blockchain crypto provider. + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of key store. If this is unset, a location will be + # chosen using: 'LocalMSPDir'/keystore + FileKeyStore: + KeyStore: msp/keystore + + PKCS11: + # Location of the PKCS11 module library + Library: "library1" + # Token Label + Label: "label1" + # User PIN + Pin: "1234" + Hash: SHA2 + Security: 256 + FileKeyStore: + KeyStore: "keystore2" + + # Authentication contains configuration parameters related to authenticating + # client messages + Authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + TimeWindow: 15m + +################################################################################ +# +# SECTION: File Ledger +# +# - This section applies to the configuration of the file or json ledgers. +# +################################################################################ +FileLedger: + + # Location: The directory to store the blocks in. + # NOTE: If this is unset, a new temporary location will be chosen every time + # the orderer is restarted, using the prefix specified by Prefix. + Location: /var/hyperledger/production/orderer + + # The prefix to use when generating a ledger directory in temporary space. + # Otherwise, this value is ignored. + Prefix: hyperledger-fabric-ordererledger + +################################################################################ +# +# SECTION: RAM Ledger +# +# - This section applies to the configuration of the RAM ledger. +# +################################################################################ +RAMLedger: + + # History Size: The number of blocks that the RAM ledger is set to retain. + # WARNING: Appending a block to the ledger might cause the oldest block in + # the ledger to be dropped in order to limit the number total number blocks + # to HistorySize. For example, if history size is 10, when appending block + # 10, block 0 (the genesis block!) will be dropped to make room for block 10. + HistorySize: 1000 + +################################################################################ +# +# SECTION: Kafka +# +# - This section applies to the configuration of the Kafka-based orderer, and +# its interaction with the Kafka cluster. +# +################################################################################ +Kafka: + + # Retry: What do if a connection to the Kafka cluster cannot be established, + # or if a metadata request to the Kafka cluster needs to be repeated. + Retry: + # When a new channel is created, or when an existing channel is reloaded + # (in case of a just-restarted orderer), the orderer interacts with the + # Kafka cluster in the following ways: + # 1. It creates a Kafka producer (writer) for the Kafka partition that + # corresponds to the channel. + # 2. It uses that producer to post a no-op CONNECT message to that + # partition + # 3. It creates a Kafka consumer (reader) for that partition. + # If any of these steps fail, they will be re-attempted every + # for a total of , and then every + # for a total of until they succeed. + # Note that the orderer will be unable to write to or read from a + # channel until all of the steps above have been completed successfully. + ShortInterval: 5s + ShortTotal: 10m + LongInterval: 5m + LongTotal: 12h + # Affects the socket timeouts when waiting for an initial connection, a + # response, or a transmission. See Config.Net for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + NetworkTimeouts: + DialTimeout: 10s + ReadTimeout: 10s + WriteTimeout: 10s + # Affects the metadata requests when the Kafka cluster is in the middle + # of a leader election.See Config.Metadata for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Metadata: + RetryBackoff: 250ms + RetryMax: 3 + # What to do if posting a message to the Kafka cluster fails. See + # Config.Producer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Producer: + RetryBackoff: 100ms + RetryMax: 3 + # What to do if reading from the Kafka cluster fails. See + # Config.Consumer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Consumer: + RetryBackoff: 2s + # Settings to use when creating Kafka topics. Only applies when + # Kafka.Version is v0.10.1.0 or higher + Topic: + # The number of Kafka brokers across which to replicate the topic + ReplicationFactor: 3 + # Verbose: Enable logging for interactions with the Kafka cluster. + Verbose: false + + # TLS: TLS settings for the orderer's connection to the Kafka cluster. + TLS: + + # Enabled: Use TLS when connecting to the Kafka cluster. + Enabled: false + + # PrivateKey: PEM-encoded private key the orderer will use for + # authentication. + PrivateKey: + # As an alternative to specifying the PrivateKey here, uncomment the + # following "File" key and specify the file name from which to load the + # value of PrivateKey. + #File: path/to/PrivateKey + + # Certificate: PEM-encoded signed public key certificate the orderer will + # use for authentication. + Certificate: + # As an alternative to specifying the Certificate here, uncomment the + # following "File" key and specify the file name from which to load the + # value of Certificate. + #File: path/to/Certificate + + # RootCAs: PEM-encoded trusted root certificates used to validate + # certificates from the Kafka cluster. + RootCAs: + # As an alternative to specifying the RootCAs here, uncomment the + # following "File" key and specify the file name from which to load the + # value of RootCAs. + #File: path/to/RootCAs + + # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers + SASLPlain: + # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers + Enabled: false + # User: Required when Enabled is set to true + User: + # Password: Required when Enabled is set to true + Password: + + # Kafka protocol version used to communicate with the Kafka cluster brokers + # (defaults to 0.10.2.0 if not specified) + Version: + +################################################################################ +# +# Debug Configuration +# +# - This controls the debugging options for the orderer +# +################################################################################ +Debug: + + # BroadcastTraceDir when set will cause each request to the Broadcast service + # for this orderer to be written to a file in this directory + BroadcastTraceDir: + + # DeliverTraceDir when set will cause each request to the Deliver service + # for this orderer to be written to a file in this directory + DeliverTraceDir: + +################################################################################ +# +# Operations Configuration +# +# - This configures the operations server endpoint for the orderer +# +################################################################################ +Operations: + # host and port for the operations server + ListenAddress: 127.0.0.1:8443 + + # TLS configuration for the operations endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most operations service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + ClientAuthRequired: false + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Metrics Configuration +# +# - This configures metrics collection for the orderer +# +################################################################################ +Metrics: + # The metrics provider is one of statsd, prometheus, or disabled + Provider: prometheus + + # The statsd configuration + Statsd: + # network type: tcp or udp + Network: udp + + # the statsd server address + Address: 127.0.0.1:8125 + + # The interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + WriteInterval: 30s + + # The prefix is prepended to all emitted statsd metrics + Prefix: + +################################################################################ +# +# Consensus Configuration +# +# - This section contains config options for a consensus plugin. It is opaque +# to orderer, and completely up to consensus implementation to make use of. +# +################################################################################ +Consensus: + # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, + # we use following options: + + # WALDir specifies the location at which Write Ahead Logs for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + WALDir: /var/hyperledger/production/orderer/etcdraft/wal + + # SnapDir specifies the location at which snapshots for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot diff --git a/testdata/init/peer/core.yaml b/testdata/init/peer/core.yaml new file mode 100644 index 00000000..de6636cf --- /dev/null +++ b/testdata/init/peer/core.yaml @@ -0,0 +1,708 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer listenAddress. + chaincodeAddress: 0.0.0.0:7053 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7054 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + addressAutoDetect: true + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: + - 127.0.0.1:7051 + - 127.0.0.1:7052 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. It is recommended to + # use leader election for large networks of peers. + useLeaderElection: true + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: "endpoint1" + # These need to be overridden with the FQDN of the peer + address: "address1" + externaladdress: "externaladdress1" + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: 2s + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: true + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: "externalEndpoint1" + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: true + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actually buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: "keystore1" + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: "library1" + # Token Label + Label: "label1" + # User PIN + Pin: "1234" + Hash: SHA2 + Security: 256 + FileKeyStore: + KeyStore: "keystore2" + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: 5 + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: true + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running system chaincode requests. + # This option is only supported for qscc at this time. + concurrency: + qscc: 5000 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(PROJECT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(PROJECT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:latest + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:latest + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: [] + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + _lifecycle: enable + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: "cert.pem" + + # path to PEM encoded server key for the operations server + key: + file: "key.pem" + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: + - "rootcert.pem" + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/testdata/init/peer/core_bootstrap_test.yaml b/testdata/init/peer/core_bootstrap_test.yaml new file mode 100644 index 00000000..b92b0d18 --- /dev/null +++ b/testdata/init/peer/core_bootstrap_test.yaml @@ -0,0 +1,706 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer listenAddress. + chaincodeAddress: 0.0.0.0:7053 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7054 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + addressAutoDetect: true + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. It is recommended to + # use leader election for large networks of peers. + useLeaderElection: true + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: "endpoint1" + # These need to be overridden with the FQDN of the peer + address: "address1" + externaladdress: "externaladdress1" + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: 2s + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: true + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: "externalEndpoint1" + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: true + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actually buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: "keystore1" + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: "library1" + # Token Label + Label: "label1" + # User PIN + Pin: "1234" + Hash: SHA2 + Security: 256 + FileKeyStore: + KeyStore: "keystore2" + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: 5 + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: true + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running system chaincode requests. + # This option is only supported for qscc at this time. + concurrency: + qscc: 5000 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(PROJECT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(PROJECT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:latest + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:latest + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: [] + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + _lifecycle: enable + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: "cert.pem" + + # path to PEM encoded server key for the operations server + key: + file: "key.pem" + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: + - "rootcert.pem" + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/testdata/init/peer/core_invalid.yaml b/testdata/init/peer/core_invalid.yaml new file mode 100644 index 00000000..bfac3c57 --- /dev/null +++ b/testdata/init/peer/core_invalid.yaml @@ -0,0 +1,708 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer listenAddress. + chaincodeAddress: 0.0.0.0:7053 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7054 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + addressAutoDetect: true + +# Keepalive settings for peer server and clients +keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: + - 127.0.0.1:7051 + - 127.0.0.1:7052 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. It is recommended to + # use leader election for large networks of peers. + useLeaderElection: true + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: "endpoint1" + # These need to be overridden with the FQDN of the peer + address: "address1" + externaladdress: "externaladdress1" + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: 2s + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: true + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: "externalEndpoint1" + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: true + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actually buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: "keystore1" + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: "library1" + # Token Label + Label: "label1" + # User PIN + Pin: "1234" + Hash: SHA2 + Security: 256 + FileKeyStore: + KeyStore: "keystore2" + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: 5 + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: true + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running system chaincode requests. + # This option is only supported for qscc at this time. + concurrency: + qscc: 5000 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(PROJECT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(PROJECT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:latest + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:latest + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: [] + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + _lifecycle: enable + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: "cert.pem" + + # path to PEM encoded server key for the operations server + key: + file: "key.pem" + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: + - "rootcert.pem" + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/testdata/init/peer/tls-cert.pem b/testdata/init/peer/tls-cert.pem new file mode 100644 index 00000000..2d59d811 --- /dev/null +++ b/testdata/init/peer/tls-cert.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICbzCCAhagAwIBAgIUPMLMFwrc0eEvfXVWqD7JBVskuT8wCgYIKoZIzj0EAwIw +aDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMRQwEgYDVQQK +EwtIeXBlcmxlZGdlcjEPMA0GA1UECxMGRmFicmljMRkwFwYDVQQDExBmYWJyaWMt +Y2Etc2VydmVyMB4XDTIwMTAwODE3MzQwMFoXDTI1MTAwNzE3MzQwMFowbzELMAkG +A1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMRQwEgYDVQQKEwtIeXBl +cmxlZGdlcjEPMA0GA1UECxMGRmFicmljMSAwHgYDVQQDExdTYWFkcy1NYWNCb29r +LVByby5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABKpwW13lchAmpnVU +mfWR/SatyoxRbJY/Vmd47FVmTTQzP6ozas9kw7YdU8puuSBRZUysjZKog6ZIhP1i +prktViGjgZYwgZMwDgYDVR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMB +BggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBQARVNUQSGBTBonhSkx +H3U+umbX9jAfBgNVHSMEGDAWgBRZGUFKO6Oj/cWcooPUq3ZunPTyjjAUBgNVHREE +DTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIga1fONwubqaeUiO4gaV6H +WuAoSCXZScS5cdXJ5YBDGgcCIF5COASszFIlBAI2uVymhuaZyrTRHTFGS2y8pO1g +HnU6 +-----END CERTIFICATE----- diff --git a/testdata/init/peer/tls-key.pem b/testdata/init/peer/tls-key.pem new file mode 100644 index 00000000..0563be7b --- /dev/null +++ b/testdata/init/peer/tls-key.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg+nv3DrdFu5NegcRO +BB3LlTBGyPdYsP5yR7W4rVWpSP6hRANCAASqcFtd5XIQJqZ1VJn1kf0mrcqMUWyW +P1ZneOxVZk00Mz+qM2rPZMO2HVPKbrkgUWVMrI2SqIOmSIT9Yqa5LVYh +-----END PRIVATE KEY----- diff --git a/testdata/migration/secret.json b/testdata/migration/secret.json new file mode 100644 index 00000000..cdd3e0e4 --- /dev/null +++ b/testdata/migration/secret.json @@ -0,0 +1,30 @@ +{ + "tls":{ + "keystore":[ + "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ2xzVkU3Ym5zMEFHeDRndmgKUHZNdVVNVEpmRVFFMm8rcFJvV3l6SE5mQVphaFJBTkNBQVNsZUNRdFlZT3lEWVZGenB3d2p0UERIOG5XRGNZOApqQkx2NTJLdnkwWm5BeG9ORmMxSC9Ic1dBcEF0anpYeTNDYm9vNm5kOUVmYXlMSUk5QVFTRXFoOQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" + ], + "signcerts":[ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNsVENDQWp5Z0F3SUJBZ0lVTFBhT2FHMDlaVmlIRGVsNW9YbDl2YUdpSjJZd0NnWUlLb1pJemowRUF3SXcKWHpFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJBd0RnWURWUVFERXdkallURXRkR3h6Ck1CNFhEVEU1TVRFeE9URTNOVGN3TUZvWERUSTVNVEV4TmpFNE1ESXdNRm93WFRFTE1Ba0dBMVVFQmhNQ1ZWTXgKRnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbGNteGxaR2RsY2pFUApNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHU000OUFnRUdDQ3FHClNNNDlBd0VIQTBJQUJLVjRKQzFoZzdJTmhVWE9uRENPMDhNZnlkWU54anlNRXUvbllxL0xSbWNER2cwVnpVZjgKZXhZQ2tDMlBOZkxjSnVpanFkMzBSOXJJc2dqMEJCSVNxSDJqZ2Rjd2dkUXdEZ1lEVlIwUEFRSC9CQVFEQWdPbwpNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjBHCkExVWREZ1FXQkJRbk1BeHR4REpFSWViMExycllSL1FwK1hYUlNUQWZCZ05WSFNNRUdEQVdnQlI5VW83V3AyenIKdm91UGdBVUhUSk9xMnhXM25UQlZCZ05WSFJFRVRqQk1na1J1WVRrelpqQm1MWEJsWlhJeExtbGljSFl5TFhSbApjM1F0WTJ4MWMzUmxjaTUxY3kxemIzVjBhQzVqYjI1MFlXbHVaWEp6TG1Gd2NHUnZiV0ZwYmk1amJHOTFaSWNFCmZ3QUFBVEFLQmdncWhrak9QUVFEQWdOSEFEQkVBaUFaYkFGS3FYWmUybWI2cTNrYkR1aVE4NmZYOW9OSzBCTmoKUEVUZklIaS9zUUlnSDczdE5TVmFJb2lPZFhYSEhJOFkwOXVFN2tPWDJIL3hXRk5LSTgvVkxBdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + ], + "cacerts":[ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWJ5Z0F3SUJBZ0lVQzA0VnRXVGp4TU1FSzJxcmtzVFpneVZ5WU8wd0NnWUlLb1pJemowRUF3SXcKWHpFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJBd0RnWURWUVFERXdkallURXRkR3h6Ck1CNFhEVEU1TVRFeE9URTNNamN3TUZvWERUTTBNVEV4TlRFM01qY3dNRm93WHpFTE1Ba0dBMVVFQmhNQ1ZWTXgKRnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbGNteGxaR2RsY2pFUApNQTBHQTFVRUN4TUdSbUZpY21sak1SQXdEZ1lEVlFRREV3ZGpZVEV0ZEd4ek1Ga3dFd1lIS29aSXpqMENBUVlJCktvWkl6ajBEQVFjRFFnQUVCejNoaElsN0NWeEVwV21RaEppSFBleEZZTVk1a0M2emVERVR4N3k3MFd1NksrOVcKNFlrZ0Rwd1VOSnFlY0t2NlVycThPZEVSNk1malZRYUJ0bHNaMmFOV01GUXdEZ1lEVlIwUEFRSC9CQVFEQWdFRwpNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFRkgxU2p0YW5iT3UraTQrQUJRZE1rNnJiCkZiZWRNQThHQTFVZEVRUUlNQWFIQkFwZVV6QXdDZ1lJS29aSXpqMEVBd0lEU0FBd1JRSWhBT2xUYmtjMXlWeisKQ3U0UUd6NnZKYlltVEkrRUljNWVWcmkybVUyYjVSUzRBaUFnNkJhZDNPazRWcXAyV3hVenR0RnJxTHhWalJzbwpBVXBhWDJWTHMvSTBoZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + ] + }, + "component":{ + "keystore":[ + "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ28xaG1COUZ6VGltL1VYNTUKOWEyRVI3ZHRBYTRnbTF0Tm9oZ29rR0ZLeE1haFJBTkNBQVRTZEhOQkJhYU1Ydk5iTEtndmd1U0hMOXdCRWZTRApkU3l6Y2lPSnlNSDU2S09ZMUZwbURjZE5uaHl1UTc5ZzVDMHkxOVlWRVd5cVU1M2tzZUJMZXRyOQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==" + ], + "operationscerts":[ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGakNDQWIyZ0F3SUJBZ0lVTHdZYnJaU0xlTTQ2UmpuaWgyWHFoclhMQkMwd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TURVeU16RTROREl3TUZvWERUTTBNRFV4T1RFNE5ESXdNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVqS3pPWmxGdHlWUEJwQ0o2WVdmd0k1VzAKdkFZN28vZU9Nb1RVOVk5WnFpSDdndnNzQnFrUTBRSjkwaUx6UG93WEFXcG1ZRGlQNjdYQXo2cno2USthR3FORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZMSTRxZHBRbHFDR2FSOGRyWkN0eUUxR3haTUZNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQTlnbU44SGNyVDQKbDEwckI5V3cveEYxZFhIVWg1b2FqQzFwUjY5WmNES0pBaUJqZEtmWXhxNmR5dTJSZ0JkVGwyemg2Wi9JZlB1RQpraGZ2ck1kd1dmUWdUZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0=" + ], + "admincerts":[ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUI2akNDQVpHZ0F3SUJBZ0lVWm9sSVlGMndNSnhlWG9OSkN2bytjN3NxcWY0d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRFeE9URTNOVFF3TUZvWERUSXdNVEV4T0RFM05Ua3dNRm93SVRFUE1BMEcKQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHU000OUFnRUdDQ3FHU000OQpBd0VIQTBJQUJOdVVuaHJjZE1XNDlTeklOVFpEKzdleUJsU3ViQmd6L25GdkZNeDRzUFZNQVZIcUVWeS84VW9oCkk0WFgyZ3FaUm1PU3BHVGdQTGJvemZ5YUgwRlNaRWFqWURCZU1BNEdBMVVkRHdFQi93UUVBd0lIZ0RBTUJnTlYKSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTTXFhUTV6bWpFbXNRdjZTSWpKaEYrYlFhUjVUQWZCZ05WSFNNRQpHREFXZ0JRRkxkay85ZG9JcEtpc2gxRmFZWUZic3dQTENEQUtCZ2dxaGtqT1BRUURBZ05IQURCRUFpQUwreHI3CkVLbm1nWlQzUWl3Z2hGaG91VlIzT3laQTNIUUM2Tk9ZREo1bzFBSWdGa1c4VHVGdkFFaysrcEpVY1pkUkl1SUgKWUF1cjJyUFcyM2tnV2RUYndvRT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + ], + "signcerts":[ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNNekNDQWRxZ0F3SUJBZ0lVV2hEL28yVnErKzdFSEpqOG9TaUQvV2ZQb3lFd0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRFeE9URTNOVGN3TUZvWERUSXdNVEV4T0RFNE1ESXdNRm93WFRFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdZMnhwWlc1ME1RNHdEQVlEVlFRREV3VmhaRzFwYmpCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOSjBjMEVGcG94ZTgxc3NxQytDNUljdjNBRVI5SU4xTExOeUk0bkkKd2Zub281alVXbVlOeDAyZUhLNUR2MkRrTFRMWDFoVVJiS3BUbmVTeDRFdDYydjJqYlRCck1BNEdBMVVkRHdFQgovd1FFQXdJSGdEQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTcGF5L0E4OGVLVXVSQ09tSUNnQzk1CmZ6azNIekFmQmdOVkhTTUVHREFXZ0JRRkxkay85ZG9JcEtpc2gxRmFZWUZic3dQTENEQUxCZ05WSFJFRUJEQUMKZ2dBd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ2VXM1VzRXlQMkJNL0c0TzZ4TXRxVTlVUlVKcVBWcGRGeHhzVwpZY2h6S3NJQ0lDV0FjTXpTUkFTL2FrY0dadGFoQnhScHp4eHJpdVRqelhZZ1A5SnpnTk5GCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + ], + "cacerts":[ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNGekNDQWIyZ0F3SUJBZ0lVSHByNzJ4OFpVSFcvWmEzMzZVeit4dXdPMU93d0NnWUlLb1pJemowRUF3SXcKYURFTE1Ba0dBMVVFQmhNQ1ZWTXhGekFWQmdOVkJBZ1REazV2Y25Sb0lFTmhjbTlzYVc1aE1SUXdFZ1lEVlFRSwpFd3RJZVhCbGNteGxaR2RsY2pFUE1BMEdBMVVFQ3hNR1JtRmljbWxqTVJrd0Z3WURWUVFERXhCbVlXSnlhV010ClkyRXRjMlZ5ZG1WeU1CNFhEVEU1TVRFeE9URTNNamN3TUZvWERUTTBNVEV4TlRFM01qY3dNRm93YURFTE1Ba0cKQTFVRUJoTUNWVk14RnpBVkJnTlZCQWdURGs1dmNuUm9JRU5oY205c2FXNWhNUlF3RWdZRFZRUUtFd3RJZVhCbApjbXhsWkdkbGNqRVBNQTBHQTFVRUN4TUdSbUZpY21sak1Sa3dGd1lEVlFRREV4Qm1ZV0p5YVdNdFkyRXRjMlZ5CmRtVnlNRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVvNWU1NHhEWEMrTVl2TDY2aWFQMWdCbmMKVjJXdTdOOE95a0k3TkpxUzU4V29KRzlUWVYvRmEvV1E2a2lCaStnSk94cDJBYkxqYkVpbXFGVTZNOGFYRzZORgpNRU13RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3SFFZRFZSME9CQllFCkZBVXQyVC8xMmdpa3FLeUhVVnBoZ1Z1ekE4c0lNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJUUNkTExTcFN1UHAKZkxOSC9kRnk5WWJxalN6QkVvd3JaUEJwdGRZeXd1TkdZQUlnWHdaMitaaFVZTDFtOWFNU3drdzVKb2hSejBqLwpEQVVKRnN2SEdvbFlRTXc9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + ] + } +} diff --git a/testdata/msp/keystore/key.pem b/testdata/msp/keystore/key.pem new file mode 100644 index 00000000..4a7f4c30 --- /dev/null +++ b/testdata/msp/keystore/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDfTqo1Y4bE5K70 +Svi0FQlMhLJepHa7vSAicBbxPsBlgsmw8onsJ8hUmOCcuc1pKZyh5k1PKP65S0JT +1rqIfus+VPryIIvszMdA527HWpz33jeFZEnX8MB4t71Ot1147vFKPPNh51QepDJ8 +ZWGvbuutQqUdS/6H0KRukyohL4u+hev98x4lNqKXAPY50vntEYJXBJ0xtWy33d3u +/TE+IHsSuDffuL+b0FSzE9Zx+adqULWBJNoxYJWDb7iV6z9stnyVHYwZRie4rEcU +gjTO9guwVuag4GHx1iuhQXNZCLdY3kPZT1eN3NotbLoBVqPcqqdT16C/JChGi0iH +yTI3FqO7AgMBAAECggEBAKqQoX3KGYSG0AOUyEZ00vd5W+ziXjakDMmBKGT4c2iV +74ySlTqmYGQNqXLMkNNEj3kyOda/D2Fk1LLJDw13NCQWMN+EcJxdBczTeVAYLFE/ +kNT4d1bTc5BBd/2KHYFjsTXtPFGJqj0FWfKeVXD0nltmzkuEYlRGz3IOQl26vVi1 +8Ng+YE5FFiJzadHjILajHy80MqaH5QJbcdTDJ0YiwT20xeT1Y9rZLU9B3E3aXZTg +a4hf+oeSLr3e1Y8ludFL6IYMI1LsJO9xefE40a6rfLF+D/10+Dkrqpw5K4k934bT +ZvIZfh/OWLXwGGRhjsXUiZDBWd4KcaAg83MKREed/iECgYEA9d1eC2WLqpZO+nef +8w9eAGJrzmF9ZBdGuOUOJBhx1SIDD80wuaEPD9LFpVqzm+pDFWX6tkxUMG8pa0D7 +XU/SrmkIV535i5FnevIwJtrFRy2KZPvdm8WkuP6AoBwnvKhApu6jBYKIWTXz5KwC +03qRHc38v6icT0GdOVJsctXxdgMCgYEA6IM/NIoGPhk24u2iJGf0rAhHbF/B+6BF +YUgN8Kz61zzODcmWgzDtmNOV/Du+ZF26Q5ar/fNdOeftdqISMZcojLEghUIOG0fv +b7/+WmGQb0O/rthchHTFxv97jRySN/vJqA7EFutDMfPwbeIt5xKEpjoE15Q2X0K3 +Ezpn/eT5aekCgYEA2wPTHrvaap9yw1OEvHRX/GewOSxHEr5ZVaNVsXnFDWM67vyO +Bw4d6K6NOftOO/m2wH0TlQjxhiO/9bbxM/JDbvJJaCNlhJqGX9MWacZALmO3ALYM +dxLc5Z5w3i+2hiwrS4kQ13usEiwpku5XpiNs2ewA1opQk5mTgNiXmsRSUBkCgYEA +w+yUffkweXNVxLLpRjQzACOZy8CE/Tt4Oxk0ZfHdDTG3j/amVbYNABKK+Bh/vqMc +KYf1NfC0Re8hMi4vlooBKUy4UpHuwR5ErK3j7tT3mEpGY1b93r9nSOBPhADnVTVe +H7cRlR55aMZderp0Y5o/HLMXEXFav7q/+fvlVRR09sECgYEA80LefOxtRDo2wjTd +G+GrLCjCjKPXmT2q4J5G0uXNSOB+sZXh42EPl9F/iTUAe8nxDlm+9CnGfh+02Edw +RF76TZmpUwdkyC0KoCwWco3wGB/U9ZY8Amp0ehWdSNLyy8cjuHOUlU04egUaINok +tta0KQibgldQ64lmNE9ct/5igqg= +-----END PRIVATE KEY----- diff --git a/testdata/operatorconfig.yaml b/testdata/operatorconfig.yaml new file mode 100644 index 00000000..851d7a18 --- /dev/null +++ b/testdata/operatorconfig.yaml @@ -0,0 +1,5 @@ +peer: + timeouts: + dbMigration: + jobStart: 30s + jobCompletion: 45s diff --git a/testdata/secret.yaml b/testdata/secret.yaml new file mode 100644 index 00000000..bc0cb1dc --- /dev/null +++ b/testdata/secret.yaml @@ -0,0 +1,2 @@ +apiVersion: v1 +kind: Secret diff --git a/testdata/tls/tls.crt b/testdata/tls/tls.crt new file mode 100644 index 00000000..570de548 --- /dev/null +++ b/testdata/tls/tls.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIJAO9Ho8OPFKlmMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOQzEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwNDIyMTcyMTQwWhcNMjAw +NDIxMTcyMTQwWjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCTkMxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA306qNWOGxOSu9Er4tBUJTISy +XqR2u70gInAW8T7AZYLJsPKJ7CfIVJjgnLnNaSmcoeZNTyj+uUtCU9a6iH7rPlT6 +8iCL7MzHQOdux1qc9943hWRJ1/DAeLe9TrddeO7xSjzzYedUHqQyfGVhr27rrUKl +HUv+h9CkbpMqIS+LvoXr/fMeJTailwD2OdL57RGCVwSdMbVst93d7v0xPiB7Erg3 +37i/m9BUsxPWcfmnalC1gSTaMWCVg2+4les/bLZ8lR2MGUYnuKxHFII0zvYLsFbm +oOBh8dYroUFzWQi3WN5D2U9XjdzaLWy6AVaj3KqnU9egvyQoRotIh8kyNxajuwID +AQABo2YwZDAdBgNVHQ4EFgQUVCqYcL0sONnMOeLfSIf3wuVCqA4wHwYDVR0jBBgw +FoAUVCqYcL0sONnMOeLfSIf3wuVCqA4wDAYDVR0TBAUwAwEB/zAUBgNVHREEDTAL +gglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBACGy3qUgMrFo83oC8EjJr/Vb +kjavLA/NgtxAli2mYLOC0aq/K0xFEQfYMH+vyom/YH6j479gVxqVQZX3BaUmCTR5 +5r0ck8/FkFzzZ5UHbUzcnRLHUdek0v0VkjvFrhqpOpkXXNGAH1JiWlydB4QvcWIr +Jld3ospXy/Vpi3RvxPdjbR3M9O3ASEsbPvWX4L7HQPbQA4ePXWt4MV1M+nZa8OfP +7IfSMddt9b3x4pvPTIFeBPNzm5eSv1uMHFFi+7TvWt0GFuq/ceFZ7jUTcvyYXx2Z +UncC/tZEh08kf3RD45gzLVrBJAiDsIpCCjSzwYOz87wdgnQhJ10kYooqV9M5NbA= +-----END CERTIFICATE----- diff --git a/testdata/tls/tls.key b/testdata/tls/tls.key new file mode 100644 index 00000000..4a7f4c30 --- /dev/null +++ b/testdata/tls/tls.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDfTqo1Y4bE5K70 +Svi0FQlMhLJepHa7vSAicBbxPsBlgsmw8onsJ8hUmOCcuc1pKZyh5k1PKP65S0JT +1rqIfus+VPryIIvszMdA527HWpz33jeFZEnX8MB4t71Ot1147vFKPPNh51QepDJ8 +ZWGvbuutQqUdS/6H0KRukyohL4u+hev98x4lNqKXAPY50vntEYJXBJ0xtWy33d3u +/TE+IHsSuDffuL+b0FSzE9Zx+adqULWBJNoxYJWDb7iV6z9stnyVHYwZRie4rEcU +gjTO9guwVuag4GHx1iuhQXNZCLdY3kPZT1eN3NotbLoBVqPcqqdT16C/JChGi0iH +yTI3FqO7AgMBAAECggEBAKqQoX3KGYSG0AOUyEZ00vd5W+ziXjakDMmBKGT4c2iV +74ySlTqmYGQNqXLMkNNEj3kyOda/D2Fk1LLJDw13NCQWMN+EcJxdBczTeVAYLFE/ +kNT4d1bTc5BBd/2KHYFjsTXtPFGJqj0FWfKeVXD0nltmzkuEYlRGz3IOQl26vVi1 +8Ng+YE5FFiJzadHjILajHy80MqaH5QJbcdTDJ0YiwT20xeT1Y9rZLU9B3E3aXZTg +a4hf+oeSLr3e1Y8ludFL6IYMI1LsJO9xefE40a6rfLF+D/10+Dkrqpw5K4k934bT +ZvIZfh/OWLXwGGRhjsXUiZDBWd4KcaAg83MKREed/iECgYEA9d1eC2WLqpZO+nef +8w9eAGJrzmF9ZBdGuOUOJBhx1SIDD80wuaEPD9LFpVqzm+pDFWX6tkxUMG8pa0D7 +XU/SrmkIV535i5FnevIwJtrFRy2KZPvdm8WkuP6AoBwnvKhApu6jBYKIWTXz5KwC +03qRHc38v6icT0GdOVJsctXxdgMCgYEA6IM/NIoGPhk24u2iJGf0rAhHbF/B+6BF +YUgN8Kz61zzODcmWgzDtmNOV/Du+ZF26Q5ar/fNdOeftdqISMZcojLEghUIOG0fv +b7/+WmGQb0O/rthchHTFxv97jRySN/vJqA7EFutDMfPwbeIt5xKEpjoE15Q2X0K3 +Ezpn/eT5aekCgYEA2wPTHrvaap9yw1OEvHRX/GewOSxHEr5ZVaNVsXnFDWM67vyO +Bw4d6K6NOftOO/m2wH0TlQjxhiO/9bbxM/JDbvJJaCNlhJqGX9MWacZALmO3ALYM +dxLc5Z5w3i+2hiwrS4kQ13usEiwpku5XpiNs2ewA1opQk5mTgNiXmsRSUBkCgYEA +w+yUffkweXNVxLLpRjQzACOZy8CE/Tt4Oxk0ZfHdDTG3j/amVbYNABKK+Bh/vqMc +KYf1NfC0Re8hMi4vlooBKUy4UpHuwR5ErK3j7tT3mEpGY1b93r9nSOBPhADnVTVe +H7cRlR55aMZderp0Y5o/HLMXEXFav7q/+fvlVRR09sECgYEA80LefOxtRDo2wjTd +G+GrLCjCjKPXmT2q4J5G0uXNSOB+sZXh42EPl9F/iTUAe8nxDlm+9CnGfh+02Edw +RF76TZmpUwdkyC0KoCwWco3wGB/U9ZY8Amp0ehWdSNLyy8cjuHOUlU04egUaINok +tta0KQibgldQ64lmNE9ct/5igqg= +-----END PRIVATE KEY----- diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 00000000..8b982e01 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,30 @@ +//go:build tools +// +build tools + +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tools + +import ( + _ "github.com/maxbrunsfeld/counterfeiter/v6" + _ "k8s.io/code-generator" +) + +// This file imports packages that are used when running go generate, or used +// during the development process but not otherwise depended on by built code. diff --git a/version/fabricversion.go b/version/fabricversion.go new file mode 100644 index 00000000..e58ae15b --- /dev/null +++ b/version/fabricversion.go @@ -0,0 +1,100 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package version + +import ( + "strings" +) + +const ( + // Fabric versions + V1 = "1" + V1_0_0 = "1.0.0" + V1_4_0 = "1.4.0" + V1_4_6 = "1.4.6" + V1_4_7 = "1.4.7" + V1_4_8 = "1.4.8" + V1_4_9 = "1.4.9" + V1_5_3 = "1.5.3" + V2 = "2" + V2_0_0 = "2.0.0" + V2_0_1 = "2.0.1" + V2_1_0 = "2.1.0" + V2_1_1 = "2.1.1" + V2_2_0 = "2.2.0" + V2_2_1 = "2.2.1" + V2_2_3 = "2.2.3" + V2_2_4 = "2.2.4" + V2_2_5 = "2.2.5" + + V2_4_1 = "2.4.1" + + V1_4 = "V1.4" + + Unsupported = "unsupported" +) + +// OldFabricVersionsLookup map contains old fabric versions keyed +// by image tag. Used to set the fabric version of migrated instances +// that don't have fabric version set in their specs. +// This should not contain newer fabric versions as instances with newer +// fabric versions should have fabric version set in their spec. +var OldFabricVersionsLookup = map[string]interface{}{ + "1.4.2": nil, + "1.4.3": nil, + "1.4.4": nil, + "1.4.5": nil, + "1.4.6": nil, + "V1.4": nil, + "unsupported": nil, +} + +// GetFabricVersionFrom extracts fabric version from image tag in the format: -- +func GetFabricVersionFrom(imageTag string) string { + tagItems := strings.Split(imageTag, "-") + if len(tagItems) != 3 { + // Newer tags use sha256 digests, from which + // versions cannot be extracted. + return "" + } + + fabVersion := tagItems[0] + return fabVersion +} + +// GetOldFabricVersionFrom is only to be used when we need to find the +// fabric version of a migrated instance where instance.Spec.FabricVersion +// was not set previously. +func GetOldFabricVersionFrom(imageTag string) string { + version := GetFabricVersionFrom(imageTag) + + _, found := OldFabricVersionsLookup[version] + if !found { + return Unsupported + } + + return version +} + +// IsMigratedFabricVersion returns true if the given fabric version +// was set during migration to 2.5.2 or above +func IsMigratedFabricVersion(fabricVersion string) bool { + _, found := OldFabricVersionsLookup[fabricVersion] + return found +} diff --git a/version/version.go b/version/version.go new file mode 100644 index 00000000..066aabfc --- /dev/null +++ b/version/version.go @@ -0,0 +1,252 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package version + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + // DEPRECATED: operator versions for IBP logic + V210 = "2.1.0" + V212 = "2.1.2" + V213 = "2.1.3" + V250 = "2.5.0" + V251 = "2.5.1" + V252 = "2.5.2" + V253 = "2.5.3" + IBPOperator = "2.5.3" + + // IBM Support for Hyperledger Fabric product version + V100 = "1.0.0" + Operator = "1.0.0" +) + +type String string + +func (s String) Equal(new string) bool { + oldVersion := newVersion(string(s)) + newVersion := newVersion(new) + + return oldVersion.equal(newVersion) + +} +func (s String) EqualWithoutTag(new string) bool { + oldVersion := newVersion(string(s)) + newVersion := newVersion(new) + + return oldVersion.equalWithoutTag(newVersion) + +} + +func (s String) GreaterThan(new string) bool { + oldVersion := newVersion(string(s)) + newVersion := newVersion(new) + + return oldVersion.greaterThan(newVersion) +} + +func (s String) LessThan(new string) bool { + oldVersion := newVersion(string(s)) + newVersion := newVersion(new) + + return oldVersion.lessThan(newVersion) + +} + +type Version struct { + Major int `json:"major"` + Minor int `json:"minor"` + Fixpack int `json:"fixpack"` + Tag int `json:"tag"` +} + +func GetMajorReleaseVersion(version string) string { + version = stripVersionPrefix(version) + v := newVersion(version) + switch v.Major { + case 2: + return V2 + case 1: + return V1 + default: + return V1 + } +} + +func stripVersionPrefix(version string) string { + version = strings.ToLower(version) + if strings.HasPrefix(version, "v") { + version = strings.TrimPrefix(version, "v") + } + return version +} + +func newVersion(version string) *Version { + v := stringToIntList(version) + + switch len(v) { + case 1: + return &Version{ + Major: v[0], + } + case 2: + return &Version{ + Major: v[0], + Minor: v[1], + } + case 3: + return &Version{ + Major: v[0], + Minor: v[1], + Fixpack: v[2], + } + case 4: + return &Version{ + Major: v[0], + Minor: v[1], + Fixpack: v[2], + Tag: v[3], + } + } + + return &Version{} +} + +func stringToIntList(version string) []int { + var tag string + + // If version of format major.minor.fixpack-tag, extract tag first + if strings.Contains(version, "-") { + vList := strings.Split(version, "-") + version = vList[0] // major.minor.fixpack + tag = vList[1] // tag + } + + strList := strings.Split(version, ".") + if tag != "" { + strList = append(strList, tag) + } + + intList := []int{} + for _, str := range strList { + num, err := strconv.Atoi(str) + if err != nil { + // No-op: strconv.Atoi() returns 0 with the error + } + intList = append(intList, num) + } + + return intList +} + +func (v *Version) equal(newVersion *Version) bool { + if newVersion == nil { + return false + } + + if v.Major == newVersion.Major { + if v.Minor == newVersion.Minor { + if v.Fixpack == newVersion.Fixpack { + if v.Tag == newVersion.Tag { + return true + } + } + } + } + + return false +} + +func (v *Version) equalWithoutTag(newVersion *Version) bool { + if newVersion == nil { + return false + } + + if v.Major == newVersion.Major { + if v.Minor == newVersion.Minor { + if v.Fixpack == newVersion.Fixpack { + return true + } + } + } + + return false +} + +func (v *Version) lessThan(newVersion *Version) bool { + if v.Major < newVersion.Major { + return true + } else if v.Major > newVersion.Major { + return false + } + + if v.Minor < newVersion.Minor { + return true + } else if v.Minor > newVersion.Minor { + return false + } + + if v.Fixpack < newVersion.Fixpack { + return true + } else if v.Fixpack > newVersion.Fixpack { + return false + } + + if v.Tag < newVersion.Tag { + return true + } + + return false +} + +func (v *Version) greaterThan(newVersion *Version) bool { + if v.Major > newVersion.Major { + return true + } else if v.Major < newVersion.Major { + return false + } + + if v.Minor > newVersion.Minor { + return true + } else if v.Minor < newVersion.Minor { + return false + } + + if v.Fixpack > newVersion.Fixpack { + return true + } else if v.Fixpack < newVersion.Fixpack { + return false + } + + if v.Tag > newVersion.Tag { + return true + } + + return false +} + +func (v *Version) String() string { + if v != nil { + return fmt.Sprintf("%d.%d.%d-%d", v.Major, v.Minor, v.Fixpack, v.Tag) + } + return "nil" +} diff --git a/version/version_suite_test.go b/version/version_suite_test.go new file mode 100644 index 00000000..ca689e55 --- /dev/null +++ b/version/version_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package version_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestVersion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Version Suite") +} diff --git a/version/version_test.go b/version/version_test.go new file mode 100644 index 00000000..08bc785b --- /dev/null +++ b/version/version_test.go @@ -0,0 +1,167 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package version_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/IBM-Blockchain/fabric-operator/version" +) + +var _ = Describe("Version", func() { + + Context("get fabric version from", func() { + It("returns version from image tag", func() { + fabricVersion := version.GetFabricVersionFrom("1.4.3-12345-amd64") + Expect(fabricVersion).To(Equal("1.4.3")) + }) + + It("returns empty string if image tag is a sha256 digest", func() { + fabricVersion := version.GetFabricVersionFrom("sha256:2037c532f6c823667baed5af248c01c941b2344c2a939e451b81ea0e03938243") + Expect(fabricVersion).To(Equal("")) + }) + }) + + Context("get old fabric version from", func() { + It("returns version from image tag", func() { + fabricVersion := version.GetOldFabricVersionFrom("1.4.3-12345-amd64") + Expect(fabricVersion).To(Equal("1.4.3")) + }) + + It("returns 'unsupported' if an old image tag with a version not found in the lookup table", func() { + fabricVersion := version.GetOldFabricVersionFrom("1.4.1-12345-amd64") + Expect(fabricVersion).To(Equal("unsupported")) + }) + }) + + Context("is migrated fabric version", func() { + It("return true if version is found in old fabric versions lookup map", func() { + migrated := version.IsMigratedFabricVersion("1.4.6") + Expect(migrated).To(Equal(true)) + }) + + It("returns true if version is 'unsupported''", func() { + migrated := version.IsMigratedFabricVersion("unsupported") + Expect(migrated).To(Equal(true)) + }) + + It("returns false if version not found in old fabric versions lookup map", func() { + migrated := version.IsMigratedFabricVersion("1.4.9-4") + Expect(migrated).To(Equal(false)) + }) + }) + + Context("version string", func() { + var ( + V147 version.String + V147_2 version.String + V225_5 version.String + V241_1 version.String + ) + + BeforeEach(func() { + V147 = version.String("1.4.7") + V147_2 = version.String("1.4.7-2") + V225_5 = version.String("2.2.5-5") + V241_1 = version.String("2.4.1-1") + }) + + Context("equal", func() { + It("returns 1.4.7 == 1.4.7 as true", func() { + equal := V147.Equal("1.4.7") + Expect(equal).To(Equal(true)) + }) + + It("returns 1.4.7 == 1.4.6 as false", func() { + equal := V147.Equal("1.4.6") + Expect(equal).To(Equal(false)) + }) + + It("returns 1.4.7 == 1.4.7-1 as false", func() { + equal := V147.Equal("1.4.7-1") + Expect(equal).To(Equal(false)) + }) + }) + + Context("greater than", func() { + It("returns 1.4.7 > 1.4.7 as false", func() { + equal := V147.GreaterThan("1.4.7") + Expect(equal).To(Equal(false)) + }) + + It("returns 1.4.7 > 1.4.6 as true", func() { + equal := V147.GreaterThan("1.4.6") + Expect(equal).To(Equal(true)) + }) + + It("returns 1.4.7 > 1.4.7-1 as false", func() { + equal := V147.GreaterThan("1.4.7-1") + Expect(equal).To(Equal(false)) + }) + + It("returns 1.4.7-2 > 1.4.7-1 as true", func() { + equal := V147_2.GreaterThan("1.4.7-1") + Expect(equal).To(Equal(true)) + }) + + It("returns 2.2.5-5 > 2.4.1-1 as false", func() { + equal := V225_5.GreaterThan("2.4.1-1") + Expect(equal).To(Equal(false)) + }) + + It("returns 2.4.1-1 > 2.2.5-5 as true", func() { + equal := V241_1.GreaterThan("2.2.5-5") + Expect(equal).To(Equal(true)) + }) + }) + + Context("less than", func() { + It("returns 1.4.7 < 1.4.7 as false", func() { + equal := V147.LessThan("1.4.7") + Expect(equal).To(Equal(false)) + }) + + It("returns 1.4.7 < 1.4.6 as false", func() { + equal := V147.LessThan("1.4.6") + Expect(equal).To(Equal(false)) + }) + + It("returns 1.4.7 < 1.4.7-1 as true", func() { + equal := V147.LessThan("1.4.7-1") + Expect(equal).To(Equal(true)) + }) + + It("returns 1.4.7-2 < 1.4.7-1 as false", func() { + equal := V147_2.LessThan("1.4.7-1") + Expect(equal).To(Equal(false)) + }) + + It("returns 2.4.1-1 < 2.2.5-5 as false", func() { + equal := V241_1.LessThan("2.4.1-1") + Expect(equal).To(Equal(false)) + }) + + It("returns 2.2.5-5 < 2.4.1-1 as true", func() { + equal := V225_5.LessThan("2.4.1-1") + Expect(equal).To(Equal(true)) + }) + }) + }) +})